PYOMO设置时循环问题

发布于 2025-01-22 20:43:54 字数 4097 浏览 0 评论 0原文

edit

我更改了约束:

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return summation(m.u1, m.u2)

现在我得到了:attributeError:'dict'对象没有属性'is_expression_type'


我试图在一段时间内设置一个模型,我不明白为什么它不起作用。我也不知道这是否是我每次创建模型的最佳实践。当我运行写的代码时,我会得到“'_ generalvardata'对象是不可能的”,我想这是因为我如何制定约束。如果有人可以告诉我几次创建模型的最佳实践是什么?

#create random number generator
rng = np.random.default_rng()

dict_1 = {"A": (-400, 100.0), "B": (-50, -10.0), "C": (-100, 100.0)}
dict_2 = {"D": (10.0, 180.0), "E": (0.0, 80.0), "F": (0.0, 200.0), "H": 
(0.0, 20.0)}

# making a dataclass to hold the outputs
@dataclass
class NominationGenerationOutput:
    result: Dict
    diff: Dict
    diff1: Dict
    diff2: Dict
    node_lb: Dict
    node_ub: Dict


# number of nominations I want to have
max_noms_count = 10
count = 0
# maximum number of total iterations
iterCount = 0
#maximum iteration count
maxIter = 100
assert max_noms_count <= maxIter

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))
    
    # nomination vector
    m.x = Var(m.set, bounds=(lb, ub))

    # to represent an absolute value term in the objective, we need 2 auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i]) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return (sum(m.u1[i]) + sum(m.u2[i]) for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each variable 
    to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule, sense=minimize)
    results = SolverFactory("gurobi").solve(m, tee=False)
    # output handling
        if (results.solver.status == SolverStatus.ok) and (
            results.solver.termination_condition == 
            TerminationCondition.optimal):
            print("*" * 80 + "\n")
            print("objective: ", value(m.objective))
            print("*" * 80 + "\n")
            # nomination
            result = {i: m.x[i].value for i in m.x}
            # absolute differences
            diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
            # positive differences
            diff1 = {i: m.u1[i].value for i in m.x}
            # negative differences
            diff2 = {i: m.u2[i].value for i in m.x}
            output = NominationGenerationOutput(
                result, diff, diff1, diff2, node_lb, node_ub
            )
    
        print("maximum deviation: ", max(diff.values()))
        print("average deviation: ", sum(diff.values()) / len(diff))
        print("*" * 80 + "\n")
        if sum(diff.values()) / len(diff) < 20:
            noms.append(output)
            count += 1
        iterCount += 1

Edit

I changed the constraints to this:

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return summation(m.u1, m.u2)

and now I get: AttributeError: 'dict' object has no attribute 'is_expression_type'


I am trying to set up a model within a while loop and I don't understand why it is not working. I also don't know if it is best practice how I am creating a model each time. When I run the code that I wrote, I get "'_GeneralVarData' object is not iterable", I guess it is because of how I formulated the constraints. And if someone can maybe tell me what is best practice for creating a model several times?

#create random number generator
rng = np.random.default_rng()

dict_1 = {"A": (-400, 100.0), "B": (-50, -10.0), "C": (-100, 100.0)}
dict_2 = {"D": (10.0, 180.0), "E": (0.0, 80.0), "F": (0.0, 200.0), "H": 
(0.0, 20.0)}

# making a dataclass to hold the outputs
@dataclass
class NominationGenerationOutput:
    result: Dict
    diff: Dict
    diff1: Dict
    diff2: Dict
    node_lb: Dict
    node_ub: Dict


# number of nominations I want to have
max_noms_count = 10
count = 0
# maximum number of total iterations
iterCount = 0
#maximum iteration count
maxIter = 100
assert max_noms_count <= maxIter

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))
    
    # nomination vector
    m.x = Var(m.set, bounds=(lb, ub))

    # to represent an absolute value term in the objective, we need 2 auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=({key: 0 for key in m.data_set}, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i]) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return (sum(m.u1[i]) + sum(m.u2[i]) for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each variable 
    to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule, sense=minimize)
    results = SolverFactory("gurobi").solve(m, tee=False)
    # output handling
        if (results.solver.status == SolverStatus.ok) and (
            results.solver.termination_condition == 
            TerminationCondition.optimal):
            print("*" * 80 + "\n")
            print("objective: ", value(m.objective))
            print("*" * 80 + "\n")
            # nomination
            result = {i: m.x[i].value for i in m.x}
            # absolute differences
            diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
            # positive differences
            diff1 = {i: m.u1[i].value for i in m.x}
            # negative differences
            diff2 = {i: m.u2[i].value for i in m.x}
            output = NominationGenerationOutput(
                result, diff, diff1, diff2, node_lb, node_ub
            )
    
        print("maximum deviation: ", max(diff.values()))
        print("average deviation: ", sum(diff.values()) / len(diff))
        print("*" * 80 + "\n")
        if sum(diff.values()) / len(diff) < 20:
            noms.append(output)
            count += 1
        iterCount += 1

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

菊凝晚露 2025-01-29 20:43:54

如果有人需要此问题,我通过这样的写作解决了问题:

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items()}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # some data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    def bounds_rule(m, i):
        return (dict_lb[i], dict_ub[i])

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))

    # nomination vector
    m.x = Var(m.data_set, bounds=bounds_rule)

    # to represent an absolute value term in the objective, we need 2 
    auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=(0, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=(0, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return sum(m.u1[i] + m.u2[i] for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each 
    nomination to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule,
        sense=minimize,
    )
    optimiser = SolverFactory("gurobi")

    results = optimiser.solve(m)

    # output handling
    if (results.solver.status == SolverStatus.ok) and (
        results.solver.termination_condition == 
        TerminationCondition.optimal):
        print("*" * 80 + "\n")
        print("objective: ", value(m.objective))
        print("*" * 80 + "\n")
        # nomination
        result = {i: m.x[i].value for i in m.x}
        # absolute differences
        diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
        # positive differences
        diff1 = {i: m.u1[i].value for i in m.x}
        # negative differences
        diff2 = {i: m.u2[i].value for i in m.x}
        output = NominationGenerationOutput(
            result, diff, diff1, diff2, node_lb, node_ub
        )

    print("maximum deviation: ", max(diff.values()))
    print("average deviation: ", sum(diff.values()) / len(diff))
    print("*" * 80 + "\n")
    if sum(diff.values()) / len(diff) < 20:
        noms.append(output)
        count += 1
    iterCount += 1

In case anyone needs this, I solved the problem by writing it like this:

while count != max_noms_count and iterCount < maxIter:
    # get uniformly distributed sample for each sink/source
    dict_A = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
             dict_1.items()}
    dict_B = {key: rng.uniform(low=val[0], high=val[1]) for key, val in 
              dict_2.items()}

    # some data preparation
    lb1 = {key: val[0] for key, val in dict_1.items()}
    ub1 = {key: val[1] for key, val in dict_1.items()}

    lb2 = {key: val[0] for key, val in dict_2.items()}
    ub2 = {key: val[1] for key, val in dict_2.items()}

    data_dict = {**dict_A, **dict_B}
    dict_lb = {**lb1, **lb2}
    dict_ub = {**ub1, **ub2}

    def bounds_rule(m, i):
        return (dict_lb[i], dict_ub[i])

    # create optimization problem
    m = ConcreteModel()

    #Set
    m.data_set = Set(initialize=list(data_dict.keys()))

    # nomination vector
    m.x = Var(m.data_set, bounds=bounds_rule)

    # to represent an absolute value term in the objective, we need 2 
    auxiliary variables
    m.u1 = Var(
        m.data_set,
        bounds=(0, None),
    )
    m.u2 = Var(
        m.data_set,
        bounds=(0, None),
    )

    #Parameter
    m.data_param = Param(m.data_set, initialize=data_dict)

    def constraint_rule1(m, i):
        return sum(m.x[i] for i in m.data_set) == 0

    def constraint_rule2(m, i):
        return m.u1[i] - m.u2[i] == m.x[i] - m.data_param[i]

    def objective_rule(m):
        return sum(m.u1[i] + m.u2[i] for i in m.data_set)

    # add balance constraint
    m.constraint1 = Constraint(m.data_set, rule=constraint_rule1)
    m.constraint2 = Constraint(m.data_set, rule=constraint_rule2)
    # set objective as the sum of absolute distances from each 
    nomination to its random candidate and minimize that difference
    m.objective = Objective(rule=objective_rule,
        sense=minimize,
    )
    optimiser = SolverFactory("gurobi")

    results = optimiser.solve(m)

    # output handling
    if (results.solver.status == SolverStatus.ok) and (
        results.solver.termination_condition == 
        TerminationCondition.optimal):
        print("*" * 80 + "\n")
        print("objective: ", value(m.objective))
        print("*" * 80 + "\n")
        # nomination
        result = {i: m.x[i].value for i in m.x}
        # absolute differences
        diff = {i: m.u1[i].value + m.u2[i].value for i in m.x}
        # positive differences
        diff1 = {i: m.u1[i].value for i in m.x}
        # negative differences
        diff2 = {i: m.u2[i].value for i in m.x}
        output = NominationGenerationOutput(
            result, diff, diff1, diff2, node_lb, node_ub
        )

    print("maximum deviation: ", max(diff.values()))
    print("average deviation: ", sum(diff.values()) / len(diff))
    print("*" * 80 + "\n")
    if sum(diff.values()) / len(diff) < 20:
        noms.append(output)
        count += 1
    iterCount += 1
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文