SciML / DiffEqFlux.jl

Pre-built implicit layer architectures with O(1) backprop, GPUs, and stiff+non-stiff DE solvers, demonstrating scientific machine learning (SciML) and physics-informed machine learning methods
https://docs.sciml.ai/DiffEqFlux/stable
MIT License
870 stars 157 forks source link

MethodError: no method matching Float64(::Tracker.TrackedReal{Float64}) when using Array instead of Vector as parameters #82

Closed LiborKudela closed 5 years ago

LiborKudela commented 5 years ago

Hello, first of all I am not sure whether I am posting this to the right place..

I wanted to try use DiffEqFlux.jl to train a ODE system similar to 1st example but with an outside effect (something like Robin's BC). But when I make the p an Array instead of Vector (as it is done in the example) I get an error. I was not able to find out where it comes from. What am I missing here? Is this expected behavior? Do I need to manually map the indexes in the parameter Vector inside the model function? Are the destructure/restructure tools somehow supposed to deal with this?

Thanks for any reply.


# I need to build and train model that looks something like this:
#
# function model(du,u,p,t)
#   du = Array_of_parameters*u + static_coefficient.*vector_of_parameters.*(u.-u0(t))
# end
#
# But when I tweak the 1st DiffEqFlux.jl example to work with Array 
# instead of Vector I get error

using DifferentialEquations

# original example
#p = [1.5,1.0,3.0,1.0]
#function lotka_volterra(du,u,p,t)
#  x, y = u
#  α, β, δ, γ = p
#  du[1] = dx = α*x - β*x*y
#  du[2] = dy = -δ*y + γ*x*y
#end

# this still works
#p = [1.5,1.0,3.0,1.0]
#function lotka_volterra(du,u,p,t)
#  du[1] = p[1]*u[1] - p[2]*u[1]*u[2]
#  du[2] = -p[3]*u[2] + p[4]*u[1]*u[2]
#end

# this version throws > no method matching Float64(::Tracker.TrackedReal{Float64})
p = [1.5 1.0;3.0 1.0]
function lotka_volterra(du,u,p,t)
  du[1] = p[1,1]*u[1] - p[1,2]*u[1]*u[2]
  du[2] = -p[2,1]*u[2] + p[2,2]*u[1]*u[2]
end

u0 = [1.0,1.0]
tspan = (0.0,10.0)

prob = ODEProblem(lotka_volterra,u0,tspan,p)
sol = solve(prob,Tsit5())
using Plots
plot(sol)

using Flux, DiffEqFlux
#p = param([2.2,1.0,2.0,0.4]) # Original Initial Parameter Vector
p = param([2.2 1.0;2.0 0.4]) # Tweaked Initial Parameter Array
params = Flux.Params([p])

function predict_adjoint() # Our 1-layer neural network
  diffeq_adjoint(p,prob,Tsit5(),saveat=0.0:0.1:10.0)
end

loss_adjoint() = sum(abs2,x-1 for x in predict_adjoint())

data = Iterators.repeated((), 100)
opt = ADAM(0.1)
cb = function () #callback function to observe training
  display(loss_adjoint())
  # using `remake` to re-create our `prob` with current parameters `p`
  # display(plot(solve(remake(prob,p=Flux.data(p)),Tsit5(),saveat=0.0:0.1:10.0),ylim=(0,6)))
end

predict_adjoint() # <= MethodError 

# Display the ODE with the initial parameter values.
cb()
Flux.train!(loss_adjoint, params, data, opt, cb = cb)

MethodError: no method matching Float64(::Tracker.TrackedReal{Float64}) Closest candidates are: Float64(::Real, !Matched::RoundingMode) where T<:AbstractFloat at rounding.jl:194 Float64(::T<:Number) where T<:Number at boot.jl:741 Float64(!Matched::Int8) at float.jl:60 ... convert(::Type{Float64}, ::Tracker.TrackedReal{Float64}) at number.jl:7 setindex!(::Array{Float64,1}, ::Tracker.TrackedReal{Float64}, ::Int64) at array.jl:767 lotka_volterra(::Array{Float64,1}, ::Array{Float64,1}, ::TrackedArray{…,Array{Float64,2}}, ::Float64) at NeuralStuff_tweaked_example.jl:23 (::ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing})(::Array{Float64,1}, ::Array{Float64,1}, ::Vararg{Any,N} where N) at diffeqfunction.jl:230 initialize!(::OrdinaryDiffEq.ODEIntegrator{Tsit5,true,Array{Float64,1},Float64,TrackedArray{…,Array{Float64,2}},Float64,Float64,Float64,Array{Array{Float64,1},1},ODESolution{Float64,2,Array{Array{Float64,1},1},Nothing,Nothing,Array{Float64,1},Array{Array{Array{Float64,1},1},1},ODEProblem{Array{Float64,1},Tuple{Float64,Float64},true,TrackedArray{…,Array{Float64,2}},ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}},DiffEqBase.StandardODEProblem},Tsit5,OrdinaryDiffEq.InterpolationData{ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Array{Array{Float64,1},1},Array{Float64,1},Array{Array{Array{Float64,1},1},1},OrdinaryDiffEq.Tsit5Cache{Array{Float64,1},Array{Float64,1},Array{Float64,1},OrdinaryDiffEq.Tsit5ConstantCache{Float64,Float64}}},DiffEqBase.DEStats},ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},OrdinaryDiffEq.Tsit5Cache{Array{Float64,1},Array{Float64,1},Array{Float64,1},OrdinaryDiffEq.Tsit5ConstantCache{Float64,Float64}},OrdinaryDiffEq.DEOptions{Float64,Float64,Float64,Float64,typeof(DiffEqBase.ODE_DEFAULT_NORM),typeof(LinearAlgebra.opnorm),CallbackSet{Tuple{},Tuple{}},typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN),typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE),typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK),DataStructures.BinaryHeap{Float64,DataStructures.LessThan},DataStructures.BinaryHeap{Float64,DataStructures.LessThan},Nothing,Nothing,Int64,Array{Float64,1},StepRangeLen{Float64,Base.TwicePrecision{Float64},Base.TwicePrecision{Float64}},Array{Float64,1}},Array{Float64,1},Float64,Nothing}, ::OrdinaryDiffEq.Tsit5Cache{Array{Float64,1},Array{Float64,1},Array{Float64,1},OrdinaryDiffEq.Tsit5ConstantCache{Float64,Float64}}) at low_order_rk_perform_step.jl:623

init#335(::StepRangeLen{Float64,Base.TwicePrecision{Float64},Base.TwicePrecision{Float64}}, ::Array{Float64,1}, ::Array{Float64,1}, ::Nothing, ::Bool, ::Bool, ::Bool, ::Bool, ::Nothing, ::Bool, ::Bool, ::Float64, ::Float64, ::Float64, ::Bool, ::Bool, ::Rational{Int64}, ::Nothing, ::Nothing, ::Rational{Int64}, ::Int64, ::Int64, ::Int64, ::Rational{Int64}, ::Bool, ::Int64, ::Nothing, ::Nothing, ::Int64, ::typeof(DiffEqBase.ODE_DEFAULT_NORM), ::typeof(LinearAlgebra.opnorm), ::typeof(DiffEqBase.ODE_DEFAULT_ISOUTOFDOMAIN), ::typeof(DiffEqBase.ODE_DEFAULT_UNSTABLE_CHECK), ::Bool, ::Bool, ::Bool, ::Bool, ::Bool, ::Bool, ::Bool, ::Int64, ::String, ::typeof(DiffEqBase.ODE_DEFAULT_PROG_MESSAGE), ::Nothing, ::Bool, ::Bool, ::Bool, ::Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}, ::typeof(DiffEqBase.init), ::ODEProblem{Array{Float64,1},Tuple{Float64,Float64},true,TrackedArray{…,Array{Float64,2}},ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}},DiffEqBase.StandardODEProblem}, ::Tsit5, ::Array{Array{Float64,1},1}, ::Array{Float64,1}, ::Array{Any,1}, ::Type{Val{true}}) at solve.jl:356

(::getfield(DiffEqBase, Symbol("#kw##init")))(::NamedTuple{(:saveat,),Tuple{StepRangeLen{Float64,Base.TwicePrecision{Float64},Base.TwicePrecision{Float64}}}}, ::typeof(DiffEqBase.init), ::ODEProblem{Array{Float64,1},Tuple{Float64,Float64},true,TrackedArray{…,Array{Float64,2}},ODEFunction{true,typeof(lotka_volterra),LinearAlgebra.UniformScaling{Bool},Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing,Nothing},Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}},DiffEqBase.StandardODEProblem}, ::Tsit5, ::Array{Array{Float64,1},1}, ::Array{Float64,1}, ::Array{Any,1}, ::Type{Val{true}}) at none:0

__init at none:0 [inlined]

__init at none:0 [inlined]

__init at none:0 [inlined]

__solve#334 at solve.jl:4 [inlined]

__solve at none:0 [inlined]

solve_call#435(::Base.Iterators.Pairs{Symbol,StepR...

ChrisRackauckas commented 5 years ago

Thanks for the report and the MWE, it made it really easy to track this down. This is fixed in https://github.com/JuliaDiffEq/DiffEqFlux.jl/pull/83