mcabbott / Tullio.jl

MIT License
615 stars 29 forks source link

Zygote with Tullio gives wrong gradients/pullbacks using CUDA #185

Open kc-111 opened 8 months ago

kc-111 commented 8 months ago
using Tullio, Zygote, CUDA, KernelAbstractions, OMEinsum

# Show outer product of strings
A = ["x", "y", "z", "w"]
res = Array{String}(undef, length(A), length(A))
for (i, r) in enumerate(A)
    for (j, c) in enumerate(A)
        res[i, j] = string(r, c)
    end
end
display(res)

# Test outer product using einstein summation
A = rand(length(A), 100) # Last dim is batch
batchmul(A, B) = @tullio C[i,j,k] := A[i,k] * B[j,k]
# batchmul(A, B) = ein"ik,jk->ijk"(A, B)
outer_prod(A, B) = reshape(batchmul(A, B), size(A, 1)*size(B, 1), size(A, 2))
@show reshape(outer_prod(A, A), 4, 4, :) == batchmul(A, A)
(loss,), back = pullback(p -> sum(outer_prod(p, p)), A)
gs = back((one(loss)))[1]
display(gs)

# Cuda
A_cu = CuArray(Float32.(A))
(loss,), back = pullback(p -> sum(outer_prod(p, p)), A_cu)
gs = back((one(loss)))[1]
display(gs)

Using OMEinsum with CUDA gives consistent and correct results. Problem: The pullback gives different results when I use CUDA with Tullio. Discourse Discussion: https://discourse.julialang.org/t/zygote-with-tullio-gives-wrong-gradients-pullbacks-using-cuda/110767

mcabbott commented 8 months ago

I can reproduce this, seems to be a bug. Thanks for the report! Slightly shorter version below:

julia> using Tullio, Zygote, CUDA, KernelAbstractions

julia> batchmul(A, B) = @tullio C[i,j,k] := A[i,k] * B[j,k];

julia> A = rand(Float32, 4, 2); B = rand(Float32, 4, 2);

julia> withgradient((a,b) -> sum(abs2, batchmul(a, b)), A, B)
(val = 5.032481f0, grad = (Float32[0.07354373 1.634892; 2.6971781 3.3456252; 1.6237329 3.0221555; 2.502201 1.6203384], Float32[0.99070925 2.647247; 1.6691209 0.9896178; 0.45123747 2.6089365; 1.6953326 2.0719757]))

julia> withgradient((a,b) -> sum(abs2, batchmul(a, b)), cu(A), cu(B))
(val = 5.0324807f0, grad = (Float32[0.010545072 0.6002646; 0.38673505 1.228375; 0.23281904 1.1096104; 0.35877827 0.59492123], Float32[0.00033122321 0.2761269; 0.00055803615 0.103224255; 0.00015086193 0.27213085; 0.0005667995 0.21612196]))

julia> bcmul(A, B) = reshape(A, size(A,1), 1, :) .* reshape(B, 1, size(B)...);

julia> bcmul(A, B) ≈ batchmul(A, B)
true

julia> withgradient((a,b) -> sum(abs2, bcmul(a, b)), A, B)
(val = 5.032481f0, grad = (Float32[0.07354373 1.6348917; 2.6971781 3.3456252; 1.6237328 3.0221555; 2.502201 1.6203386], Float32[0.99070925 2.647247; 1.6691209 0.9896178; 0.45123744 2.6089365; 1.6953325 2.0719757]))

julia> withgradient((a,b) -> sum(abs2, bcmul(a, b)), cu(A), cu(B))
(val = 5.0324807f0, grad = (Float32[0.07354373 1.6348919; 2.6971781 3.345625; 1.6237328 3.0221558; 2.502201 1.6203384], Float32[0.9907092 2.647247; 1.6691209 0.9896178; 0.45123744 2.6089368; 1.6953325 2.0719757]))

If this is the function you actually need, then I stronly suggest that you write it as bcmul above, using Broadcasting not fancy packages.