Open roflmaostc opened 1 year ago
Splitting it in two functions could remove the boxing:
# ╔═╡ 633d1ddc-131d-465a-ac8c-c382c972dc31
function radon_tullio(I, θs, zs)
sinogram = similar(I, length(zs), length(θs))
fill!(sinogram, 0)
midpoint = size(I, 1) ÷ 2 + 1
if I isa CuArray
I_2 = linear_interpolation((1:size(I, 1), 1:size(I, 2)), I)
I_int = adapt(CuArray{Float32}, I_2)
@show "cuda"
else
I_int = linear_interpolation((1:size(I, 1), 1:size(I, 2)), I)
end
sθ = sin.(θs)
cθ = cos.(θs)
t(θs, zs, sinogram, midpoint, I_int)
return sinogram ./ maximum(sinogram)
end
# ╔═╡ 4e0f0aa8-614d-4bc3-bc93-94ba1235b1d7
function t(θs, zs, sinogram, midpoint, I_int)
sθ = sin.(θs)
cθ = cos.(θs)
@tullio sinogram[is, iθ] = @inbounds(begin
x = zs[z] * sθ[iθ] + zs[is] * cθ[iθ] + midpoint
y = -zs[z] * cθ[iθ] + zs[is] * sθ[iθ] + midpoint
I_int(y, x)
end)
end
So it works now but performance it not better than the threaded nested for loops on the CPU.
Is an 200x200 array and ranges of 200 to small?
Hi,
Following Pluto code. It's an issue with Interpolations, I checked that. But custom CUDA Kernels work with interpolations.
The CPU version runs fine (not faster than three nested threaded for loop unfortunately)
Any idea what's going on? I was hoping to avoid writing the custom CUDA Kernel. I'm tagging @maleadt since he suggested me to use Tullio :laughing: