Errors happen when graph neural networks run with two inputs
Error:
Traceback (most recent call last):
File "/home/u106118/openvino/GNN/GAT_openvino.py", line 90, in
result = compiled_model({input_layer_1.any_name: features, input_layer_2.any_name:adj})[output_layer]
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 384, in call
return self._infer_request.infer(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 143, in infer
return OVDict(super().infer(_data_dispatch(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 354, in _data_dispatch
return create_shared(inputs, request) if is_shared else create_copied(inputs, request)
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].class)(*args, *kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/datadispatcher.py", line 182, in
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 182, in
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].class)(args, **kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/datadispatcher.py", line 82, in
return Tensor(value, shared_memory=is_shared)
RuntimeError: Exception from src/bindings/python/src/pyopenvino/core/common.cpp:220:
SHARED MEMORY MODE FOR THIS TENSOR IS NOT APPLICABLE! Passed numpy array must be C contiguous.
Step-by-step reproduction
import torch
import torch.nn as nn
import torch.nn.functional as F
import scipy.sparse as sp
import openvino as ov
from pathlib import Path
import warnings
class GraphAttentionLayer(nn.Module):
""" graph attention layer """
state_dict = torch.load('GAT.pt', map_location='cpu') # just use initial weight
net.load_state_dict(state_dict)
net.eval()
print("Loaded GAT model")
input_names = ["input_0", "input_1"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
if not onnx_path.exists():
dummy_input = features
torch.onnx.export(
net,
(dummy_input, adj),
onnx_path,
input_names=input_names,
dynamic_axes={'input_0': [0]}
)
print(f"ONNX model exported to {onnx_path}.")
else:
print(f"ONNX model {onnx_path} already exists.")
if not ir_path.exists():
print("Exporting ONNX model to IR... This may take a few minutes.")
ov_model = ov.convert_model(onnx_path, input=[[2708, 1433], [2708, 2708]])
ov.save_model(ov_model, ir_path)
else:
print(f"IR model {ir_path} already exists.")
Traceback (most recent call last):
File "/home/u106118/openvino/GNN/GAT_openvino.py", line 90, in <module>
result = compiled_model({input_layer_1.any_name: features, input_layer_2.any_name:adj})[output_layer]
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 384, in __call__
return self._infer_request.infer(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 143, in infer
return OVDict(super().infer(_data_dispatch(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 354, in _data_dispatch
return create_shared(inputs, request) if is_shared else create_copied(inputs, request)
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 182, in _
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 182, in <dictcomp>
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 82, in _
return Tensor(value, shared_memory=is_shared)
RuntimeError: Exception from src/bindings/python/src/pyopenvino/core/common.cpp:220:
SHARED MEMORY MODE FOR THIS TENSOR IS NOT APPLICABLE! Passed numpy array must be C contiguous.
Issue submission checklist
[X] I'm reporting an issue. It's not a question.
[X] I checked the problem with the documentation, FAQ, open issues, Stack Overflow, etc., and have not found a solution.
[X] There is reproducer code and related data files such as images, videos, models, etc.
OpenVINO Version
2023.1.0-12185-9e6b00e51cd-releases/2023/1
Operating System
Ubuntu 20.04 (LTS)
Device used for inference
CPU
Framework
PyTorch
Model used
Graph Attention Network
Issue description
Errors happen when graph neural networks run with two inputs
Error: Traceback (most recent call last): File "/home/u106118/openvino/GNN/GAT_openvino.py", line 90, in
result = compiled_model({input_layer_1.any_name: features, input_layer_2.any_name:adj})[output_layer]
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 384, in call
return self._infer_request.infer(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/ie_api.py", line 143, in infer
return OVDict(super().infer(_data_dispatch(
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 354, in _data_dispatch
return create_shared(inputs, request) if is_shared else create_copied(inputs, request)
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].class)(*args, *kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/datadispatcher.py", line 182, in
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/data_dispatcher.py", line 182, in
return {k: value_to_tensor(v, request=request, is_shared=True, key=k) for k, v in request._inputs_data.items()}
File "/home/common/miniconda3/envs/openvino/lib/python3.10/functools.py", line 889, in wrapper
return dispatch(args[0].class)( args, **kw)
File "/home/u106118/.local/lib/python3.10/site-packages/openvino/runtime/utils/data_helpers/datadispatcher.py", line 82, in
return Tensor(value, shared_memory=is_shared)
RuntimeError: Exception from src/bindings/python/src/pyopenvino/core/common.cpp:220:
SHARED MEMORY MODE FOR THIS TENSOR IS NOT APPLICABLE! Passed numpy array must be C contiguous.
Step-by-step reproduction
import torch import torch.nn as nn import torch.nn.functional as F import scipy.sparse as sp import openvino as ov from pathlib import Path import warnings
class GraphAttentionLayer(nn.Module): """ graph attention layer """
class GAT(nn.Module): def init(self, nfeat, nhid, nclass, dropout, alpha, nheads): """Dense version of GAT.""" super(GAT, self).init() self.dropout = dropout
def encode_onehot(labels): classes = set(labels) classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)} labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32) return labels_onehot
def load_data(path=r"./data/cora/", dataset="cora"): """Load citation network dataset (cora only for now)""" print("Loading {} dataset...".format(dataset))
def normalize_adj(mx): """Row-normalize sparse matrix""" rowsum = np.array(mx.sum(1)) r_inv_sqrt = np.power(rowsum, -0.5).flatten() r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.0 r_mat_inv_sqrt = sp.diags(r_inv_sqrt) return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
def normalize_features(mx): """Row-normalize sparse matrix""" rowsum = np.array(mx.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0.0 r_mat_inv = sp.diags(r_inv) mx = r_mat_inv.dot(mx) return mx
adj, features, labels, idx_train, idx_val, idx_test = load_data() net = GAT( nfeat=features.shape[1], nhid=8, nclass=int(labels.max()) + 1, dropout=0.6, nheads=8, alpha=0.2, )
print(net) onnx_path = Path('GAT.onnx') ir_path = onnx_path.with_suffix(".xml")
state_dict = torch.load('GAT.pt', map_location='cpu') # just use initial weight
net.load_state_dict(state_dict)
net.eval() print("Loaded GAT model")
input_names = ["input_0", "input_1"] with warnings.catch_warnings(): warnings.filterwarnings("ignore") if not onnx_path.exists(): dummy_input = features torch.onnx.export( net, (dummy_input, adj), onnx_path, input_names=input_names,
dynamic_axes={'input_0': [0]}
if not ir_path.exists(): print("Exporting ONNX model to IR... This may take a few minutes.") ov_model = ov.convert_model(onnx_path, input=[[2708, 1433], [2708, 2708]]) ov.save_model(ov_model, ir_path) else: print(f"IR model {ir_path} already exists.")
core = ov.Core() model_xml = "GAT.xml" model = core.read_model(model=model_xml) compiled_model = core.compile_model(model=model, device_name="CPU") input_layer_1 = compiled_model.input(0) input_layer_2 = compiled_model.input(1) output_layer = compiled_model.input(0) print(input_layer_1) print(input_layer_2) print(output_layer) result = compiled_model({input_layer_1.any_name: features, input_layer_2.any_name:adj})[output_layer]
Relevant log output
Issue submission checklist