pyg-team / pytorch_geometric

Graph Neural Network Library for PyTorch
https://pyg.org
MIT License
21.3k stars 3.65k forks source link

many CI failures due to `RuntimeError: eigen accepts only contiguous tensors` #8541

Closed puririshi98 closed 11 months ago

puririshi98 commented 11 months ago

πŸ› Describe the bug

FAILED test/data/test_edge_index.py::test_spspmm[directed--sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[directed--add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected--sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected--add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected-transpose-sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected-transpose-add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/nn/pool/test_asap.py::test_asap - RuntimeError: eigen accepts only contiguous tensors
FAILED test/transforms/test_two_hop.py::test_two_hop - RuntimeError: eigen accepts only contiguous tensors

Versions

latest stack as of this morning

rusty1s commented 11 months ago

Can you post the full stack trace? I am not sure which tensors are not contiguous here, so this might help me track down the issue.

puririshi98 commented 11 months ago
________________________ test_spspmm[directed--sum-cpu] ________________________
device = device(type='cpu'), reduce = 'sum', transpose = False
is_undirected = False
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([2, 0, 1, 2]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'sum', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
________________________ test_spspmm[directed--add-cpu] ________________________
device = device(type='cpu'), reduce = 'add', transpose = False
is_undirected = False
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([2, 0, 1, 2]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'add', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
_______________________ test_spspmm[undirected--sum-cpu] _______________________
device = device(type='cpu'), reduce = 'sum', transpose = False
is_undirected = True
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'sum', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
_______________________ test_spspmm[undirected--add-cpu] _______________________
device = device(type='cpu'), reduce = 'add', transpose = False
is_undirected = True
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'add', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
__________________ test_spspmm[undirected-transpose-sum-cpu] ___________________
device = device(type='cpu'), reduce = 'sum', transpose = True
is_undirected = True
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'sum', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
__________________ test_spspmm[undirected-transpose-add-cpu] ___________________
device = device(type='cpu'), reduce = 'add', transpose = True
is_undirected = True
    @withCUDA
    @pytest.mark.parametrize('reduce', ReduceType.__args__)
    @pytest.mark.parametrize('transpose', TRANSPOSE)
    @pytest.mark.parametrize('is_undirected', IS_UNDIRECTED)
    def test_spspmm(device, reduce, transpose, is_undirected):
        if is_undirected:
            kwargs = dict(device=device, sort_order='row', is_undirected=True)
            adj1 = EdgeIndex([[0, 1, 1, 2], [1, 0, 2, 1]], **kwargs)
        else:
            kwargs = dict(device=device, sort_order='row')
            adj1 = EdgeIndex([[0, 1, 1, 2], [2, 0, 1, 2]], **kwargs)

        adj1_dense = adj1.to_dense().t() if transpose else adj1.to_dense()
        adj2 = EdgeIndex([[1, 0, 2, 1], [0, 1, 1, 2]], sort_order='col',
                         device=device)
        adj2_dense = adj2.to_dense()

        if reduce in ['sum', 'add']:
>           out, value = adj1.matmul(adj2, reduce=reduce, transpose=transpose)
test/data/test_edge_index.py:791: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:765: in matmul
    return matmul(self, other, input_value, other_value, reduce, transpose)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
input = tensor(crow_indices=tensor([0, 1, 3, 4]),
       col_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csr)
other = tensor(ccol_indices=tensor([0, 1, 3, 4]),
       row_indices=tensor([1, 0, 2, 1]),
       values=tensor([1., 1., 1., 1.]), size=(3, 3), nnz=4,
       layout=torch.sparse_csc)
input_value = None, other_value = None, reduce = 'add', transpose = False
    def matmul(
        input: EdgeIndex,
        other: Union[Tensor, EdgeIndex],
        input_value: Optional[Tensor] = None,
        other_value: Optional[Tensor] = None,
        reduce: ReduceType = 'sum',
        transpose: bool = False,
    ) -> Union[Tensor, Tuple[EdgeIndex, Tensor]]:
        if reduce not in ReduceType.__args__:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported")

        if not isinstance(other, EdgeIndex):
            if other_value is not None:
                raise ValueError("'other_value' not supported for sparse-dense "
                                 "matrix multiplication")
            return _spmm(input, other, input_value, reduce, transpose)

        if reduce not in ['sum', 'add']:
            raise NotImplementedError(f"`reduce='{reduce}'` not yet supported for "
                                      f"sparse-sparse matrix multiplication")

        transpose &= not input.is_undirected or input_value is not None

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            input = input.to_sparse_coo(input_value)
        elif input.is_sorted_by_col:
            input = input.to_sparse_csc(input_value)
        else:
            input = input.to_sparse_csr(input_value)

        if transpose:
            input = input.t()

        if torch_geometric.typing.WITH_WINDOWS:  # pragma: no cover
            other = other.to_sparse_coo(input_value)
        elif other.is_sorted_by_col:
            other = other.to_sparse_csc(other_value)
        else:
            other = other.to_sparse_csr(other_value)

>       out = torch.matmul(input, other)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/data/edge_index.py:1273: RuntimeError
__________________________________ test_asap ___________________________________
    @onlyLinux  # TODO  (matthias) Investigate CSR @ CSR support on Windows.
    def test_asap():
        in_channels = 16
        edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
                                   [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]])
        num_nodes = edge_index.max().item() + 1
        x = torch.randn((num_nodes, in_channels))

        for GNN in [GraphConv, GCNConv]:
            pool = ASAPooling(in_channels, ratio=0.5, GNN=GNN,
                              add_self_loops=False)
            assert str(pool) == ('ASAPooling(16, ratio=0.5)')
>           out = pool(x, edge_index)
test/nn/pool/test_asap.py:28: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1510: in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py:1519: in _call_impl
    return forward_call(*args, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
self = ASAPooling(16, ratio=0.5)
x = tensor([[-0.0009, -0.0015, -0.0009,  0.0011,  0.0041,  0.0013, -0.0012, -0.0023,
          0.0003, -0.0016, -0.0004,  ...0023,
          0.0003, -0.0016, -0.0004,  0.0030, -0.0005, -0.0011, -0.0053,  0.0015]],
       grad_fn=<MulBackward0>)
edge_index = tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 1, 2, 3],
        [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 0, 1, 2, 3]])
edge_weight = None, batch = tensor([0, 0])
    def forward(
        self,
        x: Tensor,
        edge_index: Tensor,
        edge_weight: Optional[Tensor] = None,
        batch: Optional[Tensor] = None,
    ) -> Tuple[Tensor, Tensor, Optional[Tensor], Tensor, Tensor]:
        r"""Forward pass.

        Args:
            x (torch.Tensor): The node feature matrix.
            edge_index (torch.Tensor): The edge indices.
            edge_weight (torch.Tensor, optional): The edge weights.
                (default: :obj:`None`)
            batch (torch.Tensor, optional): The batch vector
                :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns
                each node to a specific example. (default: :obj:`None`)

        Return types:
            * **x** (*torch.Tensor*): The pooled node embeddings.
            * **edge_index** (*torch.Tensor*): The coarsened edge indices.
            * **edge_weight** (*torch.Tensor, optional*): The coarsened edge
              weights.
            * **batch** (*torch.Tensor*): The coarsened batch vector.
            * **index** (*torch.Tensor*): The top-:math:`k` node indices of
              nodes which are kept after pooling.
        """
        N = x.size(0)

        edge_index, edge_weight = add_remaining_self_loops(
            edge_index, edge_weight, fill_value=1., num_nodes=N)

        if batch is None:
            batch = edge_index.new_zeros(x.size(0))

        x = x.unsqueeze(-1) if x.dim() == 1 else x

        x_pool = x
        if self.gnn_intra_cluster is not None:
            x_pool = self.gnn_intra_cluster(x=x, edge_index=edge_index,
                                            edge_weight=edge_weight)

        x_pool_j = x_pool[edge_index[0]]
        x_q = scatter(x_pool_j, edge_index[1], dim=0, reduce='max')
        x_q = self.lin(x_q)[edge_index[1]]

        score = self.att(torch.cat([x_q, x_pool_j], dim=-1)).view(-1)
        score = F.leaky_relu(score, self.negative_slope)
        score = softmax(score, edge_index[1], num_nodes=N)

        # Sample attention coefficients stochastically.
        score = F.dropout(score, p=self.dropout, training=self.training)

        v_j = x[edge_index[0]] * score.view(-1, 1)
        x = scatter(v_j, edge_index[1], dim=0, reduce='sum')

        # Cluster selection.
        fitness = self.gnn_score(x, edge_index).sigmoid().view(-1)
        perm = self.select(fitness, batch).node_index
        x = x[perm] * fitness[perm].view(-1, 1)
        batch = batch[perm]

        # Graph coarsening.
        A = to_torch_csr_tensor(edge_index, edge_weight, size=(N, N))
        S = to_torch_coo_tensor(edge_index, score, size=(N, N))
        S = S.index_select(1, perm).to_sparse_csr()
>       A = S.t().to_sparse_csr() @ (A @ S)
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/nn/pool/asap.py:151: RuntimeError
_________________________________ test_two_hop _________________________________
    def test_two_hop():
        transform = TwoHop()
        assert str(transform) == 'TwoHop()'

        edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
        edge_attr = torch.tensor([1, 2, 3, 1, 2, 3], dtype=torch.float)
        data = Data(edge_index=edge_index, edge_attr=edge_attr, num_nodes=4)

>       data = transform(data)
test/transforms/test_two_hop.py:15: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/usr/local/lib/python3.10/dist-packages/torch_geometric/transforms/base_transform.py:32: in __call__
    return self.forward(copy.copy(data))
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
self = TwoHop(), data = Data(edge_index=[2, 6], edge_attr=[6], num_nodes=4)
    def forward(self, data: Data) -> Data:
        edge_index, edge_attr = data.edge_index, data.edge_attr
        N = data.num_nodes

        if torch_geometric.typing.WITH_WINDOWS:
            adj = to_torch_coo_tensor(edge_index, size=(N, N))
        else:
            adj = to_torch_csr_tensor(edge_index, size=(N, N))

>       adj = adj @ adj
E       RuntimeError: eigen accepts only contiguous tensors
/usr/local/lib/python3.10/dist-packages/torch_geometric/transforms/two_hop.py:30: RuntimeError
=============================== warnings summary ===============================
../../../usr/local/lib/python3.10/dist-packages/torch_geometric/graphgym/config.py:19
  /usr/local/lib/python3.10/dist-packages/torch_geometric/graphgym/config.py:19: UserWarning: Could not define global config object. Please install 'yacs' via 'pip install yacs' in order to use GraphGym
    warnings.warn("Could not define global config object. Please install "
../../../usr/local/lib/python3.10/dist-packages/torch_geometric/graphgym/imports.py:14
  /usr/local/lib/python3.10/dist-packages/torch_geometric/graphgym/imports.py:14: UserWarning: Please install 'pytorch_lightning' via  'pip install pytorch_lightning' in order to use GraphGym
    warnings.warn("Please install 'pytorch_lightning' via  "
test/data/test_edge_index.py::test_spmm[directed--amin-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[directed-transpose-amin-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected--amin-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected-transpose-amin-enable_extensions-cuda:0]
  /usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py:19: UserWarning: The usage of `scatter(reduce='amin')` can be accelerated via the 'torch-scatter' package, but it was not found
    warnings.warn(message)
test/data/test_edge_index.py::test_spmm[directed--amax-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[directed-transpose-amax-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected--amax-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected-transpose-amax-enable_extensions-cuda:0]
  /usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py:19: UserWarning: The usage of `scatter(reduce='amax')` can be accelerated via the 'torch-scatter' package, but it was not found
    warnings.warn(message)
test/data/test_edge_index.py::test_spmm[directed--min-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[directed-transpose-min-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected--min-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected-transpose-min-enable_extensions-cuda:0]
test/utils/test_scatter.py::test_scatter_backward[min-cuda:0]
  /usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py:19: UserWarning: The usage of `scatter(reduce='min')` can be accelerated via the 'torch-scatter' package, but it was not found
    warnings.warn(message)
test/data/test_edge_index.py::test_spmm[directed--max-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[directed-transpose-max-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected--max-enable_extensions-cuda:0]
test/data/test_edge_index.py::test_spmm[undirected-transpose-max-enable_extensions-cuda:0]
test/utils/test_scatter.py::test_scatter_backward[max-cuda:0]
  /usr/local/lib/python3.10/dist-packages/torch_geometric/warnings.py:19: UserWarning: The usage of `scatter(reduce='max')` can be accelerated via the 'torch-scatter' package, but it was not found
    warnings.warn(message)
test/nn/conv/cugraph/test_cugraph_gat_conv.py: 24 warnings
test/nn/conv/cugraph/test_cugraph_rgcn_conv.py: 72 warnings
test/nn/conv/cugraph/test_cugraph_sage_conv.py: 64 warnings
  /usr/local/lib/python3.10/dist-packages/pylibcugraphops/pytorch/graph.py:57: UserWarning: dst_max_in_degree currently has no effect
    warnings.warn("dst_max_in_degree currently has no effect")
test/nn/conv/cugraph/test_cugraph_gat_conv.py: 24 warnings
test/nn/conv/cugraph/test_cugraph_sage_conv.py: 64 warnings
  /usr/local/lib/python3.10/dist-packages/pylibcugraphops/pytorch/graph.py:250: DeprecationWarning: SampledCSC is deprecated with the 23.08 release and will be removed in 23.10, use CSC instead.
    warnings.warn(
test/nn/conv/cugraph/test_cugraph_gat_conv.py: 24 warnings
test/nn/conv/cugraph/test_cugraph_sage_conv.py: 64 warnings
  /usr/local/lib/python3.10/dist-packages/pylibcugraphops/pytorch/graph.py:275: DeprecationWarning: StaticCSC is deprecated with the 23.08 release and will be removed in 23.10, use CSC instead.
    warnings.warn(
test/nn/conv/cugraph/test_cugraph_rgcn_conv.py: 72 warnings
  /usr/local/lib/python3.10/dist-packages/pylibcugraphops/pytorch/graph.py:302: DeprecationWarning: SampledHeteroCSC is deprecated with the 23.08 release and will be removed in 23.10, use HeteroCSC instead.
    warnings.warn(
test/nn/conv/cugraph/test_cugraph_rgcn_conv.py: 72 warnings
  /usr/local/lib/python3.10/dist-packages/pylibcugraphops/pytorch/graph.py:330: DeprecationWarning: StaticHeteroCSC is deprecated with the 23.08 release and will be removed in 23.10, use HeteroCSC instead.
    warnings.warn(
test/nn/dense/test_linear.py::test_hetero_linear[cpu]
test/nn/dense/test_linear.py::test_hetero_linear[cuda:0]
  /usr/local/lib/python3.10/dist-packages/torch/jit/_check.py:178: UserWarning: The TorchScript type system doesn't support instance-level annotations on empty non-base types in `__init__`. Instead, either 1) use a type annotation in the class body, or 2) wrap the type in `torch.jit.Attribute`.
    warnings.warn(
test/transforms/test_generate_mesh_normals.py::test_generate_mesh_normals
  /usr/local/lib/python3.10/dist-packages/torch_geometric/transforms/generate_mesh_normals.py:20: UserWarning: Using torch.cross without specifying the dim arg is deprecated.
  Please either pass the dim explicitly or simply use torch.linalg.cross.
  The default value of dim will change to agree with that of linalg.cross in a future release. (Triggered internally at /opt/pytorch/pytorch/aten/src/ATen/native/Cross.cpp:63.)
    face_norm = F.normalize(vec1.cross(vec2), p=2, dim=-1)  # [F, 3]
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
---------- coverage: platform linux, python 3.10.12-final-0 ----------
Coverage XML written to file coverage.xml
=========================== short test summary info ============================
FAILED test/data/test_edge_index.py::test_spspmm[directed--sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[directed--add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected--sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected--add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected-transpose-sum-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/data/test_edge_index.py::test_spspmm[undirected-transpose-add-cpu] - RuntimeError: eigen accepts only contiguous tensors
FAILED test/nn/pool/test_asap.py::test_asap - RuntimeError: eigen accepts only contiguous tensors
FAILED test/transforms/test_two_hop.py::test_two_hop - RuntimeError: eigen accepts only contiguous tensors
= 8 failed, 5775 passed, 570 skipped, 2 deselected, 503 warnings in 200.98s (0:03:20) =