gridap / GridapDistributed.jl

Parallel distributed-memory version of Gridap
MIT License
103 stars 15 forks source link

PSparseMatrix * PVector fails for certain case #118

Closed zjwegert closed 1 year ago

zjwegert commented 1 year ago

Hi all,

The following fails with a A check failed error:

using Gridap
using GridapDistributed
using PartitionedArrays

function main_ex1(parts)
    domain = (0,1,0,1)
    mesh_partition = (4,4)
    model = CartesianDiscreteModel(parts,domain,mesh_partition)
    order = 2
    u((x,y)) = (x+y)^order
    f(x) = -Δ(u,x)
    reffe = ReferenceFE(lagrangian,Float64,order)
    V = TestFESpace(model,reffe,dirichlet_tags="boundary")
    U = TrialFESpace(u,V)
    Ω = Triangulation(model)
    dΩ = Measure(Ω,2*order)
    a(u,v) = ∫( ∇(v)⋅∇(u) )dΩ
    l(v) = ∫( v*f )dΩ
    op = AffineFEOperator(a,l,U,V)
    uh = solve(op)

    K = op.op.matrix;
    U = get_free_dof_values(uh)
    K*U
    # dot(U,K*U) 
end

partition = (2,2)
with_backend(main_ex1, SequentialBackend(), partition)

This outputs:

ERROR: AssertionError: A check failed
Stacktrace:
 [1] macro expansion
   @ C:\Users\Zac\.julia\packages\PartitionedArrays\mKknH\src\Helpers.jl:59 [inlined]
 [2] mul!(c::PVector{Float64, SequentialData{Vector{Float64}, 2}, PRange{SequentialData{IndexRange, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}}, a::PSparseMatrix{Float64, SequentialData{SparseMatrixCSC{Float64, Int64}, 2}, PRange{SequentialData{IndexRange, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}, PRange{SequentialData{IndexRange, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int64}, 2}}}, b::PVector{Float64, SequentialData{Vector{Float64}, 2}, PRange{SequentialData{IndexSet, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}}, α::Bool, β::Bool)
   @ PartitionedArrays C:\Users\Zac\.julia\packages\PartitionedArrays\mKknH\src\Interfaces.jl:2403
 [3] mul!
   @ C:\Users\Zac\AppData\Local\Programs\Julia-1.8.5\share\julia\stdlib\v1.8\LinearAlgebra\src\matmul.jl:276 [inlined]
 [4] *(a::PSparseMatrix{Float64, SequentialData{SparseMatrixCSC{Float64, Int64}, 2}, PRange{SequentialData{IndexRange, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}, PRange{SequentialData{IndexRange, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int64}, 2}}}, 
b::PVector{Float64, SequentialData{Vector{Float64}, 2}, PRange{SequentialData{IndexSet, 2}, Exchanger{SequentialData{Vector{Int32}, 2}, SequentialData{Table{Int32}, 2}}, Nothing}})
   @ PartitionedArrays C:\Users\Zac\.julia\packages\PartitionedArrays\mKknH\src\Interfaces.jl:2774
 [5] main_ex1(parts::SequentialData{Int64, 2})
   @ Main c:\Users\Zac\Desktop\zacs-phd\scripts\TopOpt_Thermo_Distributed\bug.jl:24
 [6] with_backend(::typeof(main_ex1), ::SequentialBackend, ::Tuple{Int64, Int64}; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
   @ PartitionedArrays C:\Users\Zac\.julia\packages\PartitionedArrays\mKknH\src\Interfaces.jl:65
 [7] with_backend(::Function, ::SequentialBackend, ::Tuple{Int64, Int64})
   @ PartitionedArrays C:\Users\Zac\.julia\packages\PartitionedArrays\mKknH\src\Interfaces.jl:63
 [8] top-level scope
   @ c:\Users\Zac\Desktop\zacs-phd\scripts\TopOpt_Thermo_Distributed\bug.jl:29

If you instead do U = op.op.matrix\op.op.vector, K*U works as expected.

Version info:

[f9701e48] GridapDistributed v0.2.7
[5a9dfac6] PartitionedArrays v0.2.15

+

Julia Version 1.8.5
Commit 17cfb8e65e (2023-01-08 06:45 UTC)
Platform Info:
  OS: Windows (x86_64-w64-mingw32)
  CPU: 20 × 12th Gen Intel(R) Core(TM) i7-12700KF
  WORD_SIZE: 64
  LIBM: libopenlibm
  LLVM: libLLVM-13.0.1 (ORCJIT, goldmont)
  Threads: 20 on 20 virtual cores
Environment:
  JULIA_EDITOR = code
  JULIA_NUM_THREADS = 20
JordiManyer commented 1 year ago

It's expected bahaviour, your ghosts are not correct. The ghost layout for the matrix colums is given by A.cols, which is what you should use to allocate your PVector.

Try allocating x = PVector(0.0, A.cols)

If you need the dof values, just copy over the owned part to your new pvector x, and exchange the ghosts using exchange!

zjwegert commented 1 year ago

That's exactly what I needed. Thank you!