如何修复(不支持的操作数类型错误)?
我正在使用 Gurobi 编写元启发式(PSO)进行优化,但我遇到了这个问题,它显示 TypeError: unsupported operand type(s) for -: 'list' and 'list' 并且我找不到什么错了。
这是代码:
import random
from turtle import color
from matplotlib import pyplot as plt
import numpy as np
import gurobipy as grb
import time
# Fix Random Seed
random.seed(10)
# Function Returning Objective Value
def COVID_19_Program(Logistic_Function_Parameters):
K = Logistic_Function_Parameters[0][0]
Q = Logistic_Function_Parameters[0][1]
Lambda = Logistic_Function_Parameters[0][2]
v = Logistic_Function_Parameters[0][3]
COVID_19 = grb.Model(name = 'COVID-19 Optimization Model')
# Data:
# Ĉ
C = [0, 7, 16, 23, 33, 42, 49, 55, 59, 63, 68, 72, 77, 81, 87, 90, 95, 100, 106, 112, 115, 119, 123, 127, 129, 132, 136, 140, 143, 145, 149, 152, 156, 160, 163, 166, 170, 174, 179, 183, 187, 192, 197, 201, 206, 210, 215, 218, 222, 227, 231, 234, 238, 243, 247, 252, 257, 260, 265, 271, 277, 282, 287, 291, 296, 301, 305, 311, 316, 322, 328, 333, 340, 344, 350, 355, 362, 368, 373, 379, 386]
# Number of Days
Days = len(C) - 1
# τ
Taomin = 11
Taomax = 27
# ζ
Rmin = 10
Rmode = 28
Rmax = 40
# Data Sets
set_Time = range(1, Days + 1)
set_Tao = range(1, Taomax + 1)
set_Recovery = range(1, Rmax + 1)
# Model
# I(t) Equations Number (1 & 2)
def I(t, K, Q, Lambda, v):
return (K / (1 + Q * np.exp(-Lambda * t)) ** (1 / v))
# N(t) Equations Number (3 & 4)
def N(t, K, Q, Lambda, v):
if t <= 0:
return 0
else:
return I(t, K, Q, Lambda, v) - I(t - 1, K, Q, Lambda, v)
# D.V:
# Error (ε)
Epsilon = {t: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = -1000000000, name = 'ε'.format(t)) for t in set_Time}
# a
a = {j: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = 0, name = 'alpha'.format(j)) for j in set_Tao}
# b
b = {j: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = 0, name = 'beta'.format(j)) for j in set_Recovery}
# Constraints:
# Constraint (17)
constraints = {t:
COVID_19.addConstr(
lhs = grb.quicksum(a[j]*N(k - j, K, Q, Lambda, v) for j in set_Tao if j >= Taomin for k in set_Time if k <= t),
sense = grb.GRB.EQUAL,
rhs = C[t] - Epsilon[t],
name = 'Constraint_17'.format(t))
for t in set_Time}
# Constraint (18)
constraints = COVID_19.addConstr(lhs = grb.quicksum(a[j] + b[k] for j in set_Tao if j >= Taomin for k in set_Recovery if k >= Rmin), sense = grb.GRB.EQUAL, rhs = 1, name = 'Constraint_18')
# Constraint (19)
constraints = {j:
COVID_19.addConstr(
lhs = b[j - 1],
sense = grb.GRB.LESS_EQUAL,
rhs = b[j],
name = 'Constraint_19'.format(j))
for j in set_Recovery if j >= (Rmin + 1) and j <= Rmode}
# Constraint (20)
constraints = {j:
COVID_19.addConstr(
lhs = b[j],
sense = grb.GRB.GREATER_EQUAL,
rhs = b[j + 1],
name = 'Constraint_20'.format(j))
for j in set_Recovery if j >= Rmode and j <= (Rmax - 1)}
O_F = grb.quicksum(Epsilon[t]*Epsilon[t] for t in set_Time)
COVID_19.setObjective((O_F))
COVID_19.optimize()
Objective_F_Value = COVID_19.ObjVal
return Objective_F_Value
# K Values Generator
def K_Value_Randomizer():
return random.randint(1000000, 35000000)
# Q Values Generator
def Q_Value_Randomizer():
return random.randint(1, 10000)
# Lambda Values Generator
def Lambda_Value_Randomizer():
return random.random()
# nu Values Generator
def v_Value_Randomizer():
return random.uniform(0.0001, 2)
# Generalized Logistic Function Bounds [K, Q, Lambda, nu]
Parameter_Bounds = [(1000000, 35000000), (1, 10000), (0, 1), (0.0001, 2)]
Number_of_Variables = 4 # K, Q, Lambda, nu
# PSO Paramters
Swarm_Size = 20 # Number of Particles in the Swarm
Iterations = 20 # Max Number of Iterations
w = 0.75 # Inertia Weight
c1 = 1 # Cognitive Factor
c2 = 2 # Social Factor
# Value for initial fitness
initial_fitness = float('inf')
# Best global position and fitness
global_best_particle_position = []
fitness_global_best_particle_position = initial_fitness
# Swarm
Swarm = []
# Visualization
fig = plt.figure()
ax = fig.add_subplot()
fig.show()
# Particle Class
class Particle:
def __init__(self, Parameter_Bounds):
self.particle_position = []
self.particle_velocity = []
self.local_best_particle_position = []
self.fitness_particle_position = initial_fitness # Value of current particle position after calculating the objective function
self.fitness_local_best_particle_position = initial_fitness # initial value of the particle's best position after calculating the objective function
# Both lines 168 & 169 are used to fill the fitness values with an initial value
# Fill Particle (K, Q, Lambda, nu) (Generate Random Initial (Position & Velocity))
Parameter_List = [K_Value_Randomizer(), Q_Value_Randomizer(), Lambda_Value_Randomizer(), v_Value_Randomizer()]
self.particle_position.append(Parameter_List)
self.particle_velocity.append(Parameter_List)
def evaluate(self, COVID_19_Program):
self.fitness_particle_position = COVID_19_Program(self.particle_position)
if self.fitness_particle_position < self.fitness_local_best_particle_position:
self.local_best_particle_position = self.particle_position # Update the Position
self.fitness_local_best_particle_position = self.fitness_particle_position # Update the Fitness
def update_velocity(self, global_best_particle_position):
for i in range(Number_of_Variables):
r1 = random.random()
r2 = random.random()
Cognitive_Velocity = c1 * r1 * (self.local_best_particle_position[i] - self.particle_position[i])
Social_Velocity = c2 * r2 * (global_best_particle_position[i] - self.particle_position[i])
self.particle_velocity[i] = w * self.particle_velocity[i] + Cognitive_Velocity + Social_Velocity
def update_position(self, Parameter_Bounds):
for i in range(Number_of_Variables):
self.particle_position[i] = self.particle_position[i] + self.particle_velocity[i]
# Bounding
if self.particle_position[i] > Parameter_Bounds[i][1]:
self.particle_position[i] = Parameter_Bounds[i][1]
if self.particle_position[i] < Parameter_Bounds[i][0]:
self.particle_position[i] = Parameter_Bounds[i][0]
# !@#$%^&*()
A = []
for i in range(Swarm_Size):
Swarm.append(Particle(Parameter_Bounds))
for i in range(Iterations):
for j in range(Swarm_Size):
Swarm[j].evaluate(COVID_19_Program)
if Swarm[j].fitness_particle_position < fitness_global_best_particle_position:
global_best_particle_position = list(Swarm[j].particle_position)
fitness_global_best_particle_position = float(Swarm[j].fitness_particle_position)
for j in range(Swarm_Size):
Swarm[j].update_velocity(global_best_particle_position)
Swarm[j].update_position(Parameter_Bounds)
A.append(fitness_global_best_particle_position) # Record the best fitness
# Visualization
ax.plot(A, color = 'r')
fig.canvas.draw()
ax.set_xlim(left = max(0, i - Iterations), right = i + 3)
print('Optimal Solution: ', global_best_particle_position)
print('Objective Function Value: ', fitness_global_best_particle_position)
plt.show()
它说它必须对 Cognitive_Velocity 变量做一些事情,但我不明白为什么这是一个问题。
I am writing a meta-heuristic (PSO) using Gurobi for optimization and I am stuck at this problem where it says TypeError: unsupported operand type(s) for -: 'list' and 'list' and I can't find what is wrong with it.
Here is the code:
import random
from turtle import color
from matplotlib import pyplot as plt
import numpy as np
import gurobipy as grb
import time
# Fix Random Seed
random.seed(10)
# Function Returning Objective Value
def COVID_19_Program(Logistic_Function_Parameters):
K = Logistic_Function_Parameters[0][0]
Q = Logistic_Function_Parameters[0][1]
Lambda = Logistic_Function_Parameters[0][2]
v = Logistic_Function_Parameters[0][3]
COVID_19 = grb.Model(name = 'COVID-19 Optimization Model')
# Data:
# Ĉ
C = [0, 7, 16, 23, 33, 42, 49, 55, 59, 63, 68, 72, 77, 81, 87, 90, 95, 100, 106, 112, 115, 119, 123, 127, 129, 132, 136, 140, 143, 145, 149, 152, 156, 160, 163, 166, 170, 174, 179, 183, 187, 192, 197, 201, 206, 210, 215, 218, 222, 227, 231, 234, 238, 243, 247, 252, 257, 260, 265, 271, 277, 282, 287, 291, 296, 301, 305, 311, 316, 322, 328, 333, 340, 344, 350, 355, 362, 368, 373, 379, 386]
# Number of Days
Days = len(C) - 1
# τ
Taomin = 11
Taomax = 27
# ζ
Rmin = 10
Rmode = 28
Rmax = 40
# Data Sets
set_Time = range(1, Days + 1)
set_Tao = range(1, Taomax + 1)
set_Recovery = range(1, Rmax + 1)
# Model
# I(t) Equations Number (1 & 2)
def I(t, K, Q, Lambda, v):
return (K / (1 + Q * np.exp(-Lambda * t)) ** (1 / v))
# N(t) Equations Number (3 & 4)
def N(t, K, Q, Lambda, v):
if t <= 0:
return 0
else:
return I(t, K, Q, Lambda, v) - I(t - 1, K, Q, Lambda, v)
# D.V:
# Error (ε)
Epsilon = {t: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = -1000000000, name = 'ε'.format(t)) for t in set_Time}
# a
a = {j: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = 0, name = 'alpha'.format(j)) for j in set_Tao}
# b
b = {j: COVID_19.addVar(vtype = grb.GRB.CONTINUOUS, lb = 0, name = 'beta'.format(j)) for j in set_Recovery}
# Constraints:
# Constraint (17)
constraints = {t:
COVID_19.addConstr(
lhs = grb.quicksum(a[j]*N(k - j, K, Q, Lambda, v) for j in set_Tao if j >= Taomin for k in set_Time if k <= t),
sense = grb.GRB.EQUAL,
rhs = C[t] - Epsilon[t],
name = 'Constraint_17'.format(t))
for t in set_Time}
# Constraint (18)
constraints = COVID_19.addConstr(lhs = grb.quicksum(a[j] + b[k] for j in set_Tao if j >= Taomin for k in set_Recovery if k >= Rmin), sense = grb.GRB.EQUAL, rhs = 1, name = 'Constraint_18')
# Constraint (19)
constraints = {j:
COVID_19.addConstr(
lhs = b[j - 1],
sense = grb.GRB.LESS_EQUAL,
rhs = b[j],
name = 'Constraint_19'.format(j))
for j in set_Recovery if j >= (Rmin + 1) and j <= Rmode}
# Constraint (20)
constraints = {j:
COVID_19.addConstr(
lhs = b[j],
sense = grb.GRB.GREATER_EQUAL,
rhs = b[j + 1],
name = 'Constraint_20'.format(j))
for j in set_Recovery if j >= Rmode and j <= (Rmax - 1)}
O_F = grb.quicksum(Epsilon[t]*Epsilon[t] for t in set_Time)
COVID_19.setObjective((O_F))
COVID_19.optimize()
Objective_F_Value = COVID_19.ObjVal
return Objective_F_Value
# K Values Generator
def K_Value_Randomizer():
return random.randint(1000000, 35000000)
# Q Values Generator
def Q_Value_Randomizer():
return random.randint(1, 10000)
# Lambda Values Generator
def Lambda_Value_Randomizer():
return random.random()
# nu Values Generator
def v_Value_Randomizer():
return random.uniform(0.0001, 2)
# Generalized Logistic Function Bounds [K, Q, Lambda, nu]
Parameter_Bounds = [(1000000, 35000000), (1, 10000), (0, 1), (0.0001, 2)]
Number_of_Variables = 4 # K, Q, Lambda, nu
# PSO Paramters
Swarm_Size = 20 # Number of Particles in the Swarm
Iterations = 20 # Max Number of Iterations
w = 0.75 # Inertia Weight
c1 = 1 # Cognitive Factor
c2 = 2 # Social Factor
# Value for initial fitness
initial_fitness = float('inf')
# Best global position and fitness
global_best_particle_position = []
fitness_global_best_particle_position = initial_fitness
# Swarm
Swarm = []
# Visualization
fig = plt.figure()
ax = fig.add_subplot()
fig.show()
# Particle Class
class Particle:
def __init__(self, Parameter_Bounds):
self.particle_position = []
self.particle_velocity = []
self.local_best_particle_position = []
self.fitness_particle_position = initial_fitness # Value of current particle position after calculating the objective function
self.fitness_local_best_particle_position = initial_fitness # initial value of the particle's best position after calculating the objective function
# Both lines 168 & 169 are used to fill the fitness values with an initial value
# Fill Particle (K, Q, Lambda, nu) (Generate Random Initial (Position & Velocity))
Parameter_List = [K_Value_Randomizer(), Q_Value_Randomizer(), Lambda_Value_Randomizer(), v_Value_Randomizer()]
self.particle_position.append(Parameter_List)
self.particle_velocity.append(Parameter_List)
def evaluate(self, COVID_19_Program):
self.fitness_particle_position = COVID_19_Program(self.particle_position)
if self.fitness_particle_position < self.fitness_local_best_particle_position:
self.local_best_particle_position = self.particle_position # Update the Position
self.fitness_local_best_particle_position = self.fitness_particle_position # Update the Fitness
def update_velocity(self, global_best_particle_position):
for i in range(Number_of_Variables):
r1 = random.random()
r2 = random.random()
Cognitive_Velocity = c1 * r1 * (self.local_best_particle_position[i] - self.particle_position[i])
Social_Velocity = c2 * r2 * (global_best_particle_position[i] - self.particle_position[i])
self.particle_velocity[i] = w * self.particle_velocity[i] + Cognitive_Velocity + Social_Velocity
def update_position(self, Parameter_Bounds):
for i in range(Number_of_Variables):
self.particle_position[i] = self.particle_position[i] + self.particle_velocity[i]
# Bounding
if self.particle_position[i] > Parameter_Bounds[i][1]:
self.particle_position[i] = Parameter_Bounds[i][1]
if self.particle_position[i] < Parameter_Bounds[i][0]:
self.particle_position[i] = Parameter_Bounds[i][0]
# !@#$%^&*()
A = []
for i in range(Swarm_Size):
Swarm.append(Particle(Parameter_Bounds))
for i in range(Iterations):
for j in range(Swarm_Size):
Swarm[j].evaluate(COVID_19_Program)
if Swarm[j].fitness_particle_position < fitness_global_best_particle_position:
global_best_particle_position = list(Swarm[j].particle_position)
fitness_global_best_particle_position = float(Swarm[j].fitness_particle_position)
for j in range(Swarm_Size):
Swarm[j].update_velocity(global_best_particle_position)
Swarm[j].update_position(Parameter_Bounds)
A.append(fitness_global_best_particle_position) # Record the best fitness
# Visualization
ax.plot(A, color = 'r')
fig.canvas.draw()
ax.set_xlim(left = max(0, i - Iterations), right = i + 3)
print('Optimal Solution: ', global_best_particle_position)
print('Objective Function Value: ', fitness_global_best_particle_position)
plt.show()
It says it has to do something with Cognitive_Velocity variable but I fail to see why is it a problem.
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论