Closed paul-tqh-nguyen closed 3 years ago
I've attached a notebook demonstrating the issue. See notebook.tar.gz.
This is probably just a subtle issue where I'm writing the MLIR incorrectly, but I figured this issue would be worth mentioning.
Here's a demonstration.
import numpy as np import mlir_graphblas from mlir_graphblas.sparse_utils import MLIRSparseTensor
engine_good = mlir_graphblas.MlirJitEngine() engine_bad = mlir_graphblas.MlirJitEngine() STANDARD_PASSES = [ "--sparsification", "--sparse-tensor-conversion", "--linalg-bufferize", "--func-bufferize", "--tensor-bufferize", "--tensor-constant-bufferize", "--finalizing-bufferize", "--convert-linalg-to-loops", "--convert-scf-to-std", "--convert-std-to-llvm", ]
csr_densify_mlir_text = """ #trait_densify_csr = { indexing_maps = [ affine_map<(i,j) -> (i,j)>, affine_map<(i,j) -> (i,j)> ], iterator_types = ["parallel", "parallel"] } #CSR64 = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (i,j)>, pointerBitWidth = 64, indexBitWidth = 64 }> func @csr_densify2x2(%argA: tensor<2x2xf64, #CSR64>) -> tensor<2x2xf64> { %output_storage = constant dense<0.0> : tensor<2x2xf64> %0 = linalg.generic #trait_densify_csr ins(%argA: tensor<2x2xf64, #CSR64>) outs(%output_storage: tensor<2x2xf64>) { ^bb(%A: f64, %x: f64): linalg.yield %A : f64 } -> tensor<2x2xf64> return %0 : tensor<2x2xf64> } """
csc_densify_mlir_text = """ #trait_densify_csc = { indexing_maps = [ affine_map<(i,j) -> (j,i)>, affine_map<(i,j) -> (i,j)> ], iterator_types = ["parallel", "parallel"] } #CSC64 = #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(i,j) -> (j,i)>, pointerBitWidth = 64, indexBitWidth = 64 }> func @csc_densify2x2(%argA: tensor<2x2xf64, #CSC64>) -> tensor<2x2xf64> { %output_storage = constant dense<0.0> : tensor<2x2xf64> %0 = linalg.generic #trait_densify_csc ins(%argA: tensor<2x2xf64, #CSC64>) outs(%output_storage: tensor<2x2xf64>) { ^bb(%A: f64, %x: f64): linalg.yield %A : f64 } -> tensor<2x2xf64> return %0 : tensor<2x2xf64> } """
indices = np.array( [ [0, 1], [1, 0], ], dtype=np.uint64, ) values = np.array([50,100], dtype=np.float64) sizes = np.array([2, 2], dtype=np.uint64) sparsity = np.array([False, True], dtype=np.bool8) a = MLIRSparseTensor(indices, values, sizes, sparsity)
engine_good.add(csr_densify_mlir_text, STANDARD_PASSES)
['csr_densify2x2']
engine_good.add(csc_densify_mlir_text, STANDARD_PASSES)
['csc_densify2x2']
engine_good.csr_densify2x2(a)
array([[ 0., 50.], [100., 0.]])
engine_bad.add(csr_densify_mlir_text+csc_densify_mlir_text, STANDARD_PASSES)
['csr_densify2x2', 'csc_densify2x2']
engine_bad.csr_densify2x2(a)
array([[ 0., 100.], [ 50., 0.]])
This was fixed in https://github.com/metagraph-dev/mlir-graphblas/pull/73!
I've attached a notebook demonstrating the issue. See notebook.tar.gz.
This is probably just a subtle issue where I'm writing the MLIR incorrectly, but I figured this issue would be worth mentioning.
Here's a demonstration.
Working
Not Working