Closed Keno closed 1 year ago
All latest packages on Julia v1.8?
Try with saveat = tspan[1]:1.0f0:tspan[2]
? (That's equivalent to saveat=1.f0
)
@utkarsh530 can you change the docs to runnable examples? The docs now build on a machine with GPUs so we need to make all of the doc examples be tested.
I want to add that the EnsembleGPUArray example also fails:
using DiffEqGPU, OrdinaryDiffEq
function lorenz(du,u,p,t)
du[1] = p[1]*(u[2]-u[1])
du[2] = u[1]*(p[2]-u[3]) - u[2]
du[3] = u[1]*u[2] - p[3]*u[3]
end
u0 = Float32[1.0;0.0;0.0]
tspan = (0.0f0,100.0f0)
p = [10.0f0,28.0f0,8/3f0]
prob = ODEProblem(lorenz,u0,tspan,p)
prob_func = (prob,i,repeat) -> remake(prob,p=rand(Float32,3).*p)
monteprob = EnsembleProblem(prob, prob_func = prob_func, safetycopy=false)
sol = solve(monteprob,Tsit5(),EnsembleGPUArray(),trajectories=10_000,saveat=1.0f0);
ERROR: CUDA error: an illegal memory access was encountered (code 700, ERROR_ILLEGAL_ADDRESS)
Stacktrace:
[1] throw_api_error(res::CUDA.cudaError_enum)
@ CUDA C:\Users\danie\.julia\packages\CUDA\BbliS\lib\cudadrv\error.jl:89
[2] macro expansion
@ C:\Users\danie\.julia\packages\CUDA\BbliS\lib\cudadrv\error.jl:97 [inlined]
[3] cuMemAllocAsync(dptr::Base.RefValue{CUDA.CuPtr{Nothing}}, bytesize::Int64, hStream::CUDA.CuStream)
@ CUDA C:\Users\danie\.julia\packages\CUDA\BbliS\lib\utils\call.jl:26
[4] #alloc#1
@ C:\Users\danie\.julia\packages\CUDA\BbliS\lib\cudadrv\memory.jl:83 [inlined]
[5] macro expansion
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:43 [inlined]
[6] macro expansion
@ .\timing.jl:382 [inlined]
[7] actual_alloc(bytes::Int64; async::Bool, stream::CUDA.CuStream)
@ CUDA C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:41
[8] macro expansion
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:322 [inlined]
[9] macro expansion
@ .\timing.jl:382 [inlined]
[10] #_alloc#174
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:404 [inlined]
[11] #alloc#173
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:389 [inlined]
[12] alloc
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\pool.jl:383 [inlined]
[13] CUDA.CuArray{Float32, 2, CUDA.Mem.DeviceBuffer}(#unused#::UndefInitializer, dims::Tuple{Int64, Int64})
@ CUDA C:\Users\danie\.julia\packages\CUDA\BbliS\src\array.jl:42
[14] CuArray
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\array.jl:291 [inlined]
[15] CuArray
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\array.jl:296 [inlined]
[16] CuArray
@ C:\Users\danie\.julia\packages\CUDA\BbliS\src\array.jl:305 [inlined]
[17] batch_solve_up(ensembleprob::EnsembleProblem{ODEProblem{true, Vector{Float32}, Tuple{Float32, Float32},…}, var"#15#16", typeof(SciMLBase.DEFAULT_OUTPUT_FUNC), typeof(SciMLBase.DEFAULT_REDUCTION), Nothing}, probs::Vector{ODEProblem{true, Vector{Float32}, Tuple{Float32, Float32},…}}, alg::Tsit5{Static.False,…}, ensemblealg::EnsembleGPUArray, I::UnitRange{Int64}, u0::Matrix{Float32}, p::Matrix{Float32}; kwargs::Base.Pairs{Symbol, Any, Tuple{Symbol, Symbol, Symbol}, NamedTuple{(:adaptive, :unstable_check, :saveat), Tuple{Bool, DiffEqGPU.var"#15#21", Float32}}})
@ DiffEqGPU C:\Users\danie\.julia\packages\DiffEqGPU\2OsE7\src\DiffEqGPU.jl:704
[18] batch_solve(ensembleprob::EnsembleProblem{ODEProblem{true, Vector{Float32}, Tuple{Float32, Float32},…}, var"#15#16", typeof(SciMLBase.DEFAULT_OUTPUT_FUNC), typeof(SciMLBase.DEFAULT_REDUCTION), Nothing}, alg::Tsit5{Static.False,…}, ensemblealg::EnsembleGPUArray, I::UnitRange{Int64}, adaptive::Bool; kwargs::Base.Pairs{Symbol, Any, Tuple{Symbol, Symbol}, NamedTuple{(:unstable_check, :saveat), Tuple{DiffEqGPU.var"#15#21", Float32}}})
@ DiffEqGPU C:\Users\danie\.julia\packages\DiffEqGPU\2OsE7\src\DiffEqGPU.jl:671
[19] macro expansion
@ .\timing.jl:382 [inlined]
[20] __solve(ensembleprob::EnsembleProblem{ODEProblem{true, Vector{Float32}, Tuple{Float32, Float32},…}, var"#15#16", typeof(SciMLBase.DEFAULT_OUTPUT_FUNC), typeof(SciMLBase.DEFAULT_REDUCTION), Nothing}, alg::Tsit5{Static.False,…}, ensemblealg::EnsembleGPUArray; trajectories::Int64, batch_size::Int64, unstable_check::Function, adaptive::Bool, kwargs::Base.Pairs{Symbol, Float32, Tuple{Symbol}, NamedTuple{(:saveat,), Tuple{Float32}}})
@ DiffEqGPU C:\Users\danie\.julia\packages\DiffEqGPU\2OsE7\src\DiffEqGPU.jl:523
[21] #solve#31
@ C:\Users\danie\.julia\packages\DiffEqBase\qPmC2\src\solve.jl:956 [inlined]
[22] top-level scope
@ c:\Users\danie\OneDrive\Dokumente\schule\studium\7. Semester\Spezialisierung\Spezialisierung_code\Pairproduction\test.jl:14
Docs got fully updated and are now fully tested as of https://github.com/SciML/DiffEqGPU.jl/pull/255. This should be fixed. Let me know if you run into anything else.
The basic EnsembleGPUKernel example from https://docs.sciml.ai/DiffEqGPU/stable/tutorials/gpu_ensemble_basic/ fails for me: