Alternative forward models

This tutorial was generated using Literate.jl. Download the source as a .jl file. Download the source as a .ipynb file.

This example demonstrates how to train convex and non-convex models.

This example uses the following packages:

using SDDP
import Ipopt
import PowerModels
import Test

Formulation

For our model, we build a simple optimal power flow model with a single hydro-electric generator.

The formulation of our optimal power flow problem depends on model_type, which must be one of the PowerModels formulations.

(To run locally, download pglib_opf_case5_pjm.m and update filename appropriately.)

function build_model(model_type)
    filename = joinpath(@__DIR__, "pglib_opf_case5_pjm.m")
    data = PowerModels.parse_file(filename)
    return SDDP.PolicyGraph(
        SDDP.UnicyclicGraph(0.95);
        sense = :Min,
        lower_bound = 0.0,
        optimizer = Ipopt.Optimizer,
    ) do sp, t
        power_model = PowerModels.instantiate_model(
            data,
            model_type,
            PowerModels.build_opf;
            jump_model = sp,
        )
        # Now add hydro power models. Assume that generator 5 is hydro, and the
        # rest are thermal.
        pg = power_model.var[:it][:pm][:nw][0][:pg][5]
        sp[:pg] = pg
        @variable(sp, x >= 0, SDDP.State, initial_value = 10.0)
        @variable(sp, deficit >= 0)
        @constraint(sp, balance, x.out == x.in - pg + deficit)
        @stageobjective(sp, objective_function(sp) + 1e6 * deficit)
        SDDP.parameterize(sp, [0, 2, 5]) do ω
            return SDDP.set_normalized_rhs(balance, ω)
        end
        return
    end
end
build_model (generic function with 1 method)

Training a convex model

We can build and train a convex approximation of the optimal power flow problem.

The problem with the convex model is that it does not accurately simulate the true dynamics of the problem. Therefore, it under-estimates the true cost of operation.

convex = build_model(PowerModels.DCPPowerModel)
SDDP.train(convex; iteration_limit = 10)
-------------------------------------------------------------------
         SDDP.jl (c) Oscar Dowson and contributors, 2017-23
-------------------------------------------------------------------
problem
  nodes           : 1
  state variables : 1
  scenarios       : Inf
  existing cuts   : false
options
  solver          : serial mode
  risk measure    : SDDP.Expectation()
  sampling scheme : SDDP.InSampleMonteCarlo
subproblem structure
  VariableRef                             : [20, 20]
  AffExpr in MOI.EqualTo{Float64}         : [13, 13]
  AffExpr in MOI.GreaterThan{Float64}     : [6, 6]
  AffExpr in MOI.LessThan{Float64}        : [6, 6]
  VariableRef in MOI.GreaterThan{Float64} : [14, 14]
  VariableRef in MOI.LessThan{Float64}    : [11, 11]
numerical stability report
  matrix range     [1e+00, 2e+02]
  objective range  [1e+00, 1e+06]
  bounds range     [4e-01, 6e+00]
  rhs range        [5e-01, 5e+00]
-------------------------------------------------------------------
 iteration    simulation      bound        time (s)     solves  pid
-------------------------------------------------------------------
         1   1.523310e+07  5.067080e+04  2.255886e+00       263   1
         3   9.421417e+05  3.910824e+05  5.291379e+00       461   1
         6   2.162658e+06  3.961903e+05  1.100551e+01       842   1
         8   1.941006e+06  4.223458e+05  2.081691e+01      1248   1
        10   3.679273e+05  4.299914e+05  2.340025e+01      1338   1
-------------------------------------------------------------------
status         : iteration_limit
total time (s) : 2.340025e+01
total solves   : 1338
best bound     :  4.299914e+05
simulation ci  :  2.301986e+06 ± 2.866614e+06
numeric issues : 0
-------------------------------------------------------------------

To more accurately simulate the dynamics of the problem, a common approach is to write the cuts representing the policy to a file, and then read them into a non-convex model:

SDDP.write_cuts_to_file(convex, "convex.cuts.json")
non_convex = build_model(PowerModels.ACPPowerModel)
SDDP.read_cuts_from_file(non_convex, "convex.cuts.json")

Now we can simulate non_convex to evaluate the policy.

result = SDDP.simulate(non_convex, 1)
1-element Vector{Vector{Dict{Symbol, Any}}}:
 [Dict(:bellman_term => 402656.3530971356, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37541507405, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 397553.22745532525, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415074082, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 392450.1018135147, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415074053, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394071.60017277324, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37541507405, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 395693.0985320318, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375415074057, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 390589.9728902213, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415074042, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 385486.84724841086, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415073995, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382563.0571285107, :noise_term => 5, :node_index => 1, :stage_objective => 19510.165380716066, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 384184.55548014864, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375415073962, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 390289.1365067864, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375415074035, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 391051.9613275786, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415074053, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 392673.45968683704, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375415074057, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398778.04071347474, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375415074046, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 393674.9150716643, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37541507407, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 388571.7894298539, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415074024, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 383468.6637880436, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375415073897, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382563.0571201103, :noise_term => 5, :node_index => 1, :stage_objective => 17739.248003824112, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 388667.6381387252, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375415074028, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 390289.1364979837, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37541507404, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 391910.6348572421, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375415074064, :objective_state => nothing, :belief => Dict(1 => 1.0))]

A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.

Training a non-convex model

We can also build and train a non-convex formulation of the optimal power flow problem.

The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.

non_convex = build_model(PowerModels.ACPPowerModel)
SDDP.train(non_convex; iteration_limit = 10)
result = SDDP.simulate(non_convex, 1)
1-element Vector{Vector{Dict{Symbol, Any}}}:
 [Dict(:bellman_term => 384044.78133150784, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375264353024, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 379185.374417273, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375264352973, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 380729.43231068406, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37526435301, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382273.49020409526, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375264353013, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 383817.5480975064, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375264353002, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 385361.60599091754, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375264352988, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 391174.6404227592, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375264353024, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 392718.6983161705, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37526435303, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 394262.7562095816, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37526435304, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 400075.7906414233, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375264353068, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 405888.8250732646, :noise_term => 0, :node_index => 1, :stage_objective => 21433.37526435342, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 401029.4181590297, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375264353064, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 406842.4525908691, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375264355192, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 401983.0456766341, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375264353104, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 397123.63876239926, :noise_term => 5, :node_index => 1, :stage_objective => 21433.375264353046, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402936.673194241, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375264353137, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 404480.7310876519, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375264353206, :objective_state => nothing, :belief => Dict(1 => 1.0))]

Combining convex and non-convex models

To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.

As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.

convex = build_model(PowerModels.DCPPowerModel)
A policy graph with 1 nodes.
 Node indices: 1
non_convex = build_model(PowerModels.ACPPowerModel)
A policy graph with 1 nodes.
 Node indices: 1

To do so, we train convex using the SDDP.AlternativeForwardPass forward pass, which simulates the model using non_convex, and we use SDDP.AlternativePostIterationCallback as a post-iteration callback, which copies cuts from the convex model back into the non_convex model.

SDDP.train(
    convex;
    forward_pass = SDDP.AlternativeForwardPass(non_convex),
    post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex),
    iteration_limit = 10,
)
-------------------------------------------------------------------
         SDDP.jl (c) Oscar Dowson and contributors, 2017-23
-------------------------------------------------------------------
problem
  nodes           : 1
  state variables : 1
  scenarios       : Inf
  existing cuts   : false
options
  solver          : serial mode
  risk measure    : SDDP.Expectation()
  sampling scheme : SDDP.InSampleMonteCarlo
subproblem structure
  VariableRef                             : [20, 20]
  AffExpr in MOI.EqualTo{Float64}         : [13, 13]
  AffExpr in MOI.GreaterThan{Float64}     : [6, 6]
  AffExpr in MOI.LessThan{Float64}        : [6, 6]
  VariableRef in MOI.GreaterThan{Float64} : [14, 14]
  VariableRef in MOI.LessThan{Float64}    : [11, 11]
numerical stability report
  matrix range     [1e+00, 2e+02]
  objective range  [1e+00, 1e+06]
  bounds range     [4e-01, 6e+00]
  rhs range        [5e-01, 5e+00]
-------------------------------------------------------------------
 iteration    simulation      bound        time (s)     solves  pid
-------------------------------------------------------------------
         1   7.175851e+06  6.712497e+04  1.375217e+00        96   1
         4   1.176771e+06  9.904408e+04  5.294588e+00       255   1
         5   1.036801e+06  3.408117e+05  9.261329e+00       390   1
         8   1.479110e+06  3.499230e+05  1.452001e+01       627   1
        10   4.561834e+05  3.851470e+05  1.700329e+01       729   1
-------------------------------------------------------------------
status         : iteration_limit
total time (s) : 1.700329e+01
total solves   : 729
best bound     :  3.851470e+05
simulation ci  :  1.730880e+06 ± 1.479845e+06
numeric issues : 0
-------------------------------------------------------------------

In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.