odow / SDDP.jl

A JuMP extension for Stochastic Dual Dynamic Programming
https://sddp.dev
Other
309 stars 61 forks source link

Markovian uncertainty with different conditional probabilities per stage #402

Closed bonnkleiford closed 3 years ago

bonnkleiford commented 3 years ago

Hello!

I have a (relatively large) problem with Markovian uncertainty. I have the conditional probabilities per stage in a JSON file. I tried making a parser but MarkovianGraph won't read it because it is Vector{Any} and not Vector{Matrix{Float64}. They look the same but the conversion from a regular [any] array to a matrix is not well documented.

I'd like to ask if you possibly have a parser for huge problems or if you have any idea how to go around with it?

odow commented 3 years ago

You can convert a vector from Vector{Any} using convert(Vector{Float64}, x). Or go Float64.(x).

For example: https://github.com/odow/MilkPOWDER/blob/16eb5873d6cbce230ed7e6c9484a95752e46fe97/POWDER.jl#L25-L29

odow commented 3 years ago

Closing because this seems resolved. There isn't anything for us to do here.

bonnkleiford commented 3 years ago

Hi, Oscar!

My apologies for the late response. I was away for a while and just got back.

Anyway, I was already able to successfully do the conversion and got it working. Thanks for the help.

Also, I have another problem. Hehe. I am trying to solve this two-stage problem. If I solve it with SDDP.LinearPolicyGraph, it works and provides a good solution. See code below:

using SDDP, GLPK, Distributions
# PARAMETERS
T = 2
I = 50
J = 30
#Investment cost
c = rand(Uniform(5,20), I)
#Operating cost
f = rand(Uniform(10,50), I, J)

#Budget limit
b = 100000
#Min installed capacity
m = 12000

model = SDDP.LinearPolicyGraph(
    stages = 2,
    sense = :Min,
    lower_bound = 0,
    optimizer = GLPK.Optimizer
) do subproblem, stage
    # Define the state variable.
    @variable(subproblem, 0 <= x[i=1:I], SDDP.State, initial_value = 0)
    @variables(subproblem, begin
        0 <= y[i=1:I,j=1:J]
        DEMAND
        end)    

    # Define the constraints
    if stage==2
        @constraints(subproblem, begin
                [i=1:I], sum(y[i,j] for j in 1:J) <= x[i].in
                [j=1:J], sum(y[i,j] for i in 1:I) >= DEMAND

        end)
        @stageobjective(subproblem, sum(f[i,j]*y[i,j] for i in 1:I for j in 1:J))
        SDDP.parameterize(subproblem, [4, 4, 3, 3, 3, 2, 2, 2, 1, 1],[0.1 for i in 1:10]) do ω
            JuMP.fix(DEMAND, ω)
        end
    else
        @constraints(subproblem, begin
                                sum(x[i].out for i in 1:I) >= m
                                sum(c[i]*x[i].out for i in 1:I) <= b
                                end)
        @stageobjective(subproblem, sum(c[i]*x[i].out for i in 1:I))
    end
end

I tried using SDDP.PolicyGraph with SDDP.MarkovianGraph but the problem becomes infeasible.

using SDDP, GLPK, Distributions
I = 50
J = 30
#Investment cost
c = rand(Uniform(5,20), I)
#Operating cost
f = rand(Uniform(10,50), I, J)

#Budget limit
b = 100000
#Min installed capacity
m = 12000

DEMAND = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    [4.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0]]

tr = [
    [0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1],
    [0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1;
     0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1  0.1],]

model = SDDP.PolicyGraph(
    SDDP.MarkovianGraph(tr),
    bellman_function = SDDP.BellmanFunction(upper_bound = 10000.0),
    optimizer = GLPK.Optimizer,
)do subproblem, index
    (stage, markov_state) = index
    @variable(subproblem, 0 <= x[i=1:I], SDDP.State, initial_value = 0)
    @variables(subproblem, begin
        0 <= y[i=1:I,j=1:J]
        end)    
    if stage==2
        @constraints(subproblem, begin
                [i=1:I], sum(y[i,j] for j in 1:J) <= x[i].in
                [j=1:J], sum(y[i,j] for i in 1:I) >= DEMAND[stage][markov_state]

        end)
        @stageobjective(subproblem, sum(f[i,j]*y[i,j] for i in 1:I for j in 1:J))
    else
        @constraints(subproblem, begin
                                sum(x[i].out for i in 1:I) >= m
                                sum(c[i]*x[i].out for i in 1:I) <= b
                                end)
        @stageobjective(subproblem, sum(c[i]*x[i].out for i in 1:I))
    end
end

I'm not sure I'm doing anything wrong here.

odow commented 3 years ago

Wrap your code in triple backpacks to format it, not a single one.

```Julia
code

Why have an upper bound when minimizing in the Markovian case? Presumably you want
```Julia
model = SDDP.PolicyGraph(
    graph = SDDP.MarkovianGraph([fill(0.1, 1, 10), fill(0.1, 10, 10)]),
    lower_bound = 0.0,
    optimizer = GLPK.Optimizer,
) do subproblem, index
bonnkleiford commented 3 years ago

I think it still works but it takes a moment. But yes, I'll definitely use triple backpacks.

Oh, damn. This is a classic clumsiness.

Thank you very much! 😊

odow commented 3 years ago

I haven't tested, but here its how I would write it

using SDDP, GLPK, Distributions

T = 2
I = 50
J = 30
c = rand(Uniform(5,20), I)
f = rand(Uniform(10,50), I, J)
b = 100000
m = 12000

DEMAND = [4.0, 4.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0]

model = SDDP.LinearPolicyGraph(
    stages = 2,
    lower_bound = 0.0,
    optimizer = GLPK.Optimizer,
) do sp, stage
    @variable(sp, x[1:I] >= 0, SDDP.State, initial_value = 0)
    @variable(sp, y[1:I, 1:J] >= 0)
    if stage == 1
        @constraint(sp, sum(x[i].out for i in 1:I) >= m)
        @constraint(sp, sum(c[i] * x[i].out for i in 1:I) <= b)
        @stageobjective(sp, sum(c[i] * x[i].out for i in 1:I))
    else
        @constraint(sp, [i=1:I], sum(y[i, :]) <= x[i].in)
        @constraint(sp, demand[j=1:J], sum(y[:,j]) >= 0)
        @stageobjective(sp, sum(f[i, j] * y[i, j] for i in 1:I for j in 1:J))
        SDDP.parameterize(sp, DEMAND) do ω
            return set_normalized_rhs.(demand, ω)
        end
    end
end

model = SDDP.PolicyGraph(
    graph = SDDP.MarkovianGraph([fill(0.1, 1, 10), fill(0.1, 10, 10)]),
    lower_bound = 0.0,
    optimizer = GLPK.Optimizer,
) do sp, node
    (stage, i) = node
    @variable(sp, x[1:I] >= 0, SDDP.State, initial_value = 0)
    @variable(sp, y[1:I, 1:J] >= 0)
    if stage == 1
        @constraint(sp, sum(x[i].out for i in 1:I) >= m)
        @constraint(sp, sum(c[i] * x[i].out for i in 1:I) <= b)
        @stageobjective(sp, sum(c[i] * x[i].out for i in 1:I))
    else
        @constraint(sp, [i=1:I], sum(y[i, :]) <= x[i].in)
        @constraint(sp, [j=1:J], sum(y[:, j]) >= DEMAND[i])
        @stageobjective(sp, sum(f[i, j] * y[i, j] for i in 1:I for j in 1:J))
    end
end