JuliaGPU / CUDA.jl

CUDA programming in Julia.
https://juliagpu.org/cuda/
Other
1.21k stars 219 forks source link

This intrinsic must be compiled to be called #611

Closed kadir-gunel closed 3 years ago

kadir-gunel commented 3 years ago

Hello,

I have a flux model and when I take the gradients of the model by using Zygote I get ERROR: this intrinsic must be compiled to be called Below is tested only on GPU not on CPU.

For the mwe please use the following code :

using Flux
using CUDA
using Flux: glorot_uniform
using Statistics: mean

CUDA.allowscalar(false); # disallowing scalar operations on GPU

mutable struct Enc
    rConv::Chain
    iConv::Chain

    function Enc(filter, stride, in, out, pad )
        realConv = Chain(Conv(filter, in=>out, leakyrelu, init=glorot_uniform, stride=stride, pad=pad),
                         BatchNorm(out, relu))
        imgConv  = Chain(Conv(filter, in=>out, leakyrelu, init=glorot_uniform, stride=stride, pad=pad),
                         BatchNorm(out, relu))
        new(realConv, imgConv)
    end

    function Enc(rConv::Chain, iConv::Chain)
        new(rConv, iConv)
    end
end 
Flux.@functor Enc

function (enc::Enc)(x)
    rC = enc.rConv(real(x)) 
    iC = enc.iConv(imag(x))
    rC = rC - iC
    iC = rC + iC
    complex.(rC, iC) 
end

function multistft(spectrogram::CuArray{T, 4},
                    framelen::Int=1024,
                    hopsize::Int=div(framelen, 2)) where T <: Complex

    freqbins, numframes, channels, samples = size(spectrogram)
    expectedlen = framelen + (numframes - 1) * hopsize

    spectrogram = isodd(numframes) ? hcat(spectrogram, CUDA.zeros(eltype(spectrogram), size(spectrogram, 1), 1, channels, samples)) : spectrogram
    numframes   = isodd(numframes) ? numframes + 1 : numframes  # number of frames can be altered here, it should not effect the original framelen !

    # window  = hanningTensor(framelen, numframes, channels, samples)
    window  = CUDA.ones(Float32, (framelen, numframes, channels, samples)) .* CUDA.CuArray(Float32.(.5 .* (1 .- cos.(2 .* pi .* collect(0:framelen - 1)/(framelen - 1)))))
    windows = CUDA.fill(Float32(1.0e-8), framelen, numframes, channels, samples) .+ (window.^2)

    odds   = Flux.flatten(windows[:, 1:2:end, :, :]);
    evens  = Flux.flatten(windows[:, 2:2:end, :, :]);
    winsum = vcat(odds, CUDA.zeros(Float32, hopsize, samples)) .+ vcat(CUDA.zeros(Float32, hopsize, samples), evens);

    wr_odd  = window[:, 1:2:end, :, :] .* CUDA.CUFFT.irfft(spectrogram[:, 1:2:end, :, :], framelen, 1);
    wr_even = window[:, 2:2:end, :, :] .* CUDA.CUFFT.irfft(spectrogram[:, 2:2:end, :, :], framelen, 1);

    reconstructed = vcat(Flux.flatten(wr_odd), CUDA.zeros(Float32, hopsize, samples)) .+ vcat(CUDA.zeros(Float32, hopsize, samples), Flux.flatten(wr_even))

    return (reconstructed ./ winsum)
end

# this loss is user-defined
function wsdrLoss(x, ŷ, y; ϵ=1e-8)

    x = x |> multistft
    ŷ = ŷ |> multistft
    y = y |> multistft

    z = x .- y
    ẑ = x .- ŷ

    nd  = sum(y.^2; dims=1)[:]
    dom = sum(z.^2; dims=1)[:]

    ϵ_array = CUDA.fill(Float32(ϵ), size(nd))
    aux = nd ./ (nd .+ dom .+ ϵ_array)
    wSDR = aux .* sdr(ŷ, y) .+ (1 .- aux) .* sdr(ẑ, z) 
    CUDA.mean(wSDR)
end

multiNorm(A; dims) = CUDA.sqrt.(sum(real(A .* conj(A)), dims=dims))

function sdr(ypred, ygold; ϵ=1e-8)
    num = sum(ygold .*  ypred, dims=1)
    den = multiNorm(ygold, dims=1) .* multiNorm(ypred, dims=1)
    ϵ_array = CUDA.fill(Float32(ϵ), size(den))
    -(num ./ (den  .+ ϵ_array))
end 

x = CUDA.rand(ComplexF32, 513, 321, 1, 1); # input
y = CUDA.rand(ComplexF32, 513, 321, 1, 1); # output

# creating a dummy model on gpu
encoder = Chain(Enc((1, 1), (1, 1), 1, 1, (0, 0))) |> gpu

#  ŷ = encoder(x);
# the loss function accepts 3 arguments that are input, prediction, and ground truths.

wsdrLoss(x, encoder(x), y) ; # works fine

gradient(wsdrLoss, x, encoder(x), y) # returns error : ERROR: this intrinsic must be compiled to be called

When taking the gradients I get :

ERROR: this intrinsic must be compiled to be called
Stacktrace:
 [1] macro expansion at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0 [inlined]
 [2] _pullback(::Zygote.Context, ::Core.IntrinsicFunction, ::String, ::Type{UInt64}, ::Type{Tuple{Ptr{UInt64}}}, ::Ptr{UInt64}) at /opChain(t/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:12
 [3] getindex at ./atomics.jl:358 [inlined]
 [4] _pullback(::Zygote.Context, ::typeof(getindex), ::Base.Threads.Atomic{UInt64}) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [5] macro expansion at /opt/.julia/packages/CUDA/YeS8q/lib/utils/call.jl:37 [inlined]
 [6] macro expansion at /opt/.julia/packages/CUDA/YeS8q/lib/cudadrv/libcuda.jl:1641 [inlined]
 [7] macro expansion at /opt/.julia/packthis intrinsic must be compiled to be calledages/CUDA/YeS8q/lib/cudadrv/error.jl:102 [inlined]
 [8] cuOccupancyMaxPotentialBlockSize at /opt/.julia/packages/CUDA/YeS8q/lib/utils/call.jl:93 [inlined]
 [9] _pullback(::Zygote.Context, ::typeof(CUDA.cuOccupancyMaxPotentialBlockSize), ::Base.RefValue{Int32}, ::Base.RefValue{Int32}, ::CuFunction, ::Ptr{Nothing}, ::Int64, ::Int64) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [10] #launch_configuration#606 at /opt/.julia/packages/CUDA/YeS8q/lib/cudadrv/occupancy.jl:58 [inlined]
 [11] _pullback(::Zygote.Context, ::CUDA.var"#launch_configuration##kw", ::NamedTuple{(:max_threads,),Tuple{Int64}}, ::typeof(launch_configuration), ::CuFunction) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [12] #launch_heuristic#853 at /opt/.julia/packages/CUDA/YeS8q/src/gpuarrays.jl:26 [inlined]
 [13] adjoint at /opt/.julia/packages/Zygote/xBjHw/src/lib/lib.jl:188 [inlined]
 [14] _pullback at /opt/.julia/packages/ZygoteRules/6nssF/src/adjoint.jl:47 [inlined]
 [15] launch_heuristic at /opt/.julia/packages/CUDA/YeS8q/src/gpuarrays.jl:17 [inlined]
 [16] adjoint at /opt/.julia/packages/Zygote/xBjHw/src/lib/lib.jl:188 [inlined]
 [17] _pullback at /opt/.julia/packages/ZygoteRules/6nssF/src/adjoint.jl:47 [inlined]
 [18] #gpu_call#1 at /opt/.julia/packages/GPUArrays/jhRU7/src/device/execution.jl:61 [inlined]
 [19] _pullback(::Zygote.Context, ::GPUArrays.var"##gpu_call#1", ::CuArray{Complex{Float32},4}, ::Nothing, ::Nothing, ::Nothing, ::Nothing, ::typeof(GPUArrays.gpu_call), ::GPUArrays.var"#4#5", ::CuArray{Complex{Float32},4}, ::Complex{Float32}) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [20] adjoint at /opt/.julia/packages/Zygote/xBjHw/src/lib/lib.jl:188 [inlined]
 [21] _pullback at /opt/.julia/packages/ZygoteRules/6nssF/src/adjoint.jl:47 [inlined]
 [22] gpu_call at /opt/.julia/packages/GPUArrays/jhRU7/src/device/execution.jl:46 [inlined]
 [23] fill! at /opt/.julia/packages/GPUArrays/jhRU7/src/host/construction.jl:5 [inlined]
 [24] _pullback(::Zygote.Context, ::typeof(fill!), ::CuArray{Complex{Float32},4}, ::Int64) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [25] zeros at /opt/.julia/packages/CUDA/YeS8q/src/array.jl:348 [inlined]
 [26] _pullback(::Zygote.Context, ::typeof(CUDA.zeros), ::Type{Complex{Float32}}, ::Int64, ::Int64, ::Int64, ::Int64) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [27] multistft at ./REPL[29]:6 [inlined]
 [28] _pullback(::Zygote.Context, ::typeof(multistft), ::CuArray{Complex{Float32},4}, ::Int64, ::Int64) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [29] multistft at ./REPL[29]:4 [inlined] (repeats 2 times)
 [30] |> at ./operators.jl:834 [inlined]
 [31] #wsdrLoss#7 at ./REPL[17]:2 [inlined]
 [32] _pullback(::Zygote.Context, ::var"##wsdrLoss#7", ::Float64, ::typeof(wsdrLoss), ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [33] wsdrLoss at ./REPL[17]:2 [inlined]
 [34] _pullback(::Zygote.Context, ::typeof(wsdrLoss), ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface2.jl:0
 [35] _pullback(::Function, ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface.jl:38
 [36] pullback(::Function, ::CuArray{Complex{Float32},4}, ::CuArray{Complex{Float32},4}, ::Vararg{CuArray{Complex{Float32},4},N} where N) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface.jl:44
 [37] gradient(::Function, ::CuArray{Complex{Float32},4}, ::Vararg{CuArray{Complex{Float32},4},N} where N) at /opt/.julia/packages/Zygote/xBjHw/src/compiler/interface.jl:53
 [38] top-level scope at REPL[31]:1

And with the suggestion of @simeonschaub Zygote.@adjoint CUDA.zeros(x...) = CUDA.zeros(x...), _ -> map(_ -> nothing, x) I managed to get the gradients. And since @simeoneschaub told me to open an issue for CUDA.jl, here it is.

kadir-gunel commented 3 years ago

Some info about CUDA :

julia> CUDA.versioninfo()
CUDA toolkit 11.1.1, artifact installation
CUDA driver 11.1.0
NVIDIA driver 455.45.1

Libraries: 
- CUBLAS: 11.3.0
- CURAND: 10.2.2
- CUFFT: 10.3.0
- CUSOLVER: 11.0.1
- CUSPARSE: 11.3.0
- CUPTI: 14.0.0
- NVML: 11.0.0+455.45.1
- CUDNN: 8.0.4 (for CUDA 11.1.0)
- CUTENSOR: 1.2.1 (for CUDA 11.1.0)

Toolchain:
- Julia: 1.5.2
- LLVM: 9.0.1
- PTX ISA support: 3.2, 4.0, 4.1, 4.2, 4.3, 5.0, 6.0, 6.1, 6.3, 6.4
- Device support: sm_35, sm_37, sm_50, sm_52, sm_53, sm_60, sm_61, sm_62, sm_70, sm_72, sm_75

2 devices
maleadt commented 3 years ago

That's a Zygote error, you're probably differentiating code that isn't supported (like the ccall to the CUDA driver here). I recommend you open a Discourse post, and if it turns out to be a bug, an issue on e.g. Flux or Zygote.jl.