mfherbst / ReducedBasis.jl

Reduced basis methods for parametrised eigenproblems
https://mfherbst.github.io/ReducedBasis.jl/stable/
MIT License
14 stars 0 forks source link

Rewrite `compress` for `AffineDecomposition`s with redundant terms #35

Open pbrehmer opened 1 year ago

pbrehmer commented 1 year ago

In its current form (see PR #34), the function computes the compressions at all non-redundant indices and returns nothing for all others. This leads to a matrixel matrix of type Matrix{Union{Nothing, Matrix{ComplexF64}}} which is why a promote_type.(matrixel) is put at the end to promote to the common floating-point type:

function compress(ad::AffineDecomposition{<:Matrix,<:Function},
                  basis::RBasis; symmetric_terms=false)
    is_nonredundant(a, b) = (size(ad.terms, 1) > size(ad.terms, 2)) ? ≥(a, b) : ≤(a, b)
    matrixel = map(CartesianIndices(ad.terms)) do idx
        if is_nonredundant(first(idx.I), last(idx.I))  # Upper/lower triangle
            return compress(ad.terms[idx], basis.snapshots)
        end
    end
    for idx in findall(isnothing, matrixel)
        if symmetric_terms
            matrixel[idx] = matrixel[last(idx.I), first(idx.I)]
        else
            matrixel[idx] = compress(ad.terms[idx], basis.snapshots)
        end
    end  # Use "symmetry" to set transposed elements
    matrixel = promote_type.(matrixel)  # Promote to common floating-point type
    rbterms = map(m -> basis.vectors' * m * basis.vectors, matrixel)

    (; rb=AffineDecomposition(rbterms, ad.coefficient_map),
     raw=AffineDecomposition(matrixel, ad.coefficient_map))
end

It would be much nicer to be able to infer the type directly from the map and then set equal all redundant ("transposed") elements. See also https://github.com/mfherbst/ReducedBasis.jl/pull/34#discussion_r1089421137.

Also the symmetric_terms condition should be reevaluated, see https://github.com/mfherbst/ReducedBasis.jl/pull/34#discussion_r1089421711 . Maybe a generalized wrapper type for the terms like Symmetric is the better option in the long run.

pbrehmer commented 1 year ago

As already mentioned in #42, this part should also be parallelized. The compressions of the different elements of terms can be all performed in parallel. As a starting point, we could use:

function compress(ad::AffineDecomposition{<:Matrix}, basis::RBasis; symmetric_terms=false)
    matrixel = similar(ad.terms, Matrix{Number})
    is_nonredundant(a, b) = (size(ad.terms, 1) > size(ad.terms, 2)) ? ≥(a, b) : ≤(a, b)
    Threads.@threads for idx in CartesianIndices(matrixel)
        if is_nonredundant(first(idx.I), last(idx.I))  # Redundant upper/lower triangle
            matrixel[idx] = compress(ad.terms[idx], basis.snapshots)
        end
    end  

    Threads.@threads for idx in findall(x -> !is_nonredundant(first(x.I), last(x.I)), matrixel)
        if symmetric_terms
            matrixel[idx] = matrixel[last(idx.I), first(idx.I)]
        else
            matrixel[idx] = compress(ad.terms[idx], basis.snapshots)
        end
    end  # Use "symmetry" to fill redundant elements
    matrixel = promote_type.(matrixel)  # Promote to common floating-point type
    rbterms = map(m -> basis.vectors' * m * basis.vectors, matrixel)

    (; rb=AffineDecomposition(rbterms, ad.coefficients),
     raw=AffineDecomposition(matrixel, ad.coefficients))
end

This still uses the inconvenient promote_type.(matrixel), but at the moment it is unclear to me, how to avoid this.