RuntimeError: The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0
{
"name": "RuntimeError",
"message": "The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0",
"stack": "---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[87], line 21
19 model.train()
20 optimizer.zero_grad()
---> 21 log_logits = model(data.x, data.edge_index)
22 loss = F.nll_loss(log_logits, data.y)
23 loss.backward()
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1562, in Module._call_impl(self, *args, *kwargs)
1557 # If we don't have any hooks, we want to skip the rest of the logic in
1558 # this function, and just call forward.
1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1560 or _global_backward_pre_hooks or _global_backward_hooks
1561 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1562 return forward_call(args, **kwargs)
1564 try:
1565 result = None
Cell In[87], line 8, in GCN.forward(self, x, edge_index)
7 def forward(self, x, edge_index):
----> 8 h = self.conv1(x, edge_index).relu()
9 h = F.dropout(h, p=0.5, training=self.training)
10 h = self.conv2(h, edge_index)
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1562, in Module._call_impl(self, *args, *kwargs)
1557 # If we don't have any hooks, we want to skip the rest of the logic in
1558 # this function, and just call forward.
1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1560 or _global_backward_pre_hooks or _global_backward_hooks
1561 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1562 return forward_call(args, **kwargs)
1564 try:
1565 result = None
Cell In[11], line 20, in GCNConv.forward(self, x, edge_index)
17 deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
18 norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
---> 20 out = self.propagate(edge_index, x=x, norm=norm)
22 return out
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch_geometric/nn/conv/message_passing.py:523, in MessagePassing.propagate(self, edge_index, size, kwargs)
521 if res is not None:
522 msg_kwargs = res[0] if isinstance(res, tuple) else res
--> 523 out = self.message(msg_kwargs)
524 for hook in self._message_forward_hooks.values():
525 res = hook(self, (msg_kwargs, ), out)
Cell In[11], line 25, in GCNConv.message(self, x, norm)
24 def message(self, x, norm):
---> 25 return norm.view(-1, 1) * x
RuntimeError: The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0"
}
I am receiving the following error:
RuntimeError: The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0
{ "name": "RuntimeError", "message": "The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0", "stack": "--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) Cell In[87], line 21 19 model.train() 20 optimizer.zero_grad() ---> 21 log_logits = model(data.x, data.edge_index)
22 loss = F.nll_loss(log_logits, data.y) 23 loss.backward()
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1553, in Module._wrapped_call_impl(self, *args, kwargs) 1551 return self._compiled_call_impl(*args, *kwargs) # type: ignore[misc] 1552 else: -> 1553 return self._call_impl(args, kwargs)
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1562, in Module._call_impl(self, *args, *kwargs) 1557 # If we don't have any hooks, we want to skip the rest of the logic in 1558 # this function, and just call forward. 1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1560 or _global_backward_pre_hooks or _global_backward_hooks 1561 or _global_forward_hooks or _global_forward_pre_hooks): -> 1562 return forward_call(args, **kwargs) 1564 try: 1565 result = None
Cell In[87], line 8, in GCN.forward(self, x, edge_index) 7 def forward(self, x, edge_index): ----> 8 h = self.conv1(x, edge_index).relu() 9 h = F.dropout(h, p=0.5, training=self.training) 10 h = self.conv2(h, edge_index)
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1553, in Module._wrapped_call_impl(self, *args, kwargs) 1551 return self._compiled_call_impl(*args, *kwargs) # type: ignore[misc] 1552 else: -> 1553 return self._call_impl(args, kwargs)
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch/nn/modules/module.py:1562, in Module._call_impl(self, *args, *kwargs) 1557 # If we don't have any hooks, we want to skip the rest of the logic in 1558 # this function, and just call forward. 1559 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1560 or _global_backward_pre_hooks or _global_backward_hooks 1561 or _global_forward_hooks or _global_forward_pre_hooks): -> 1562 return forward_call(args, **kwargs) 1564 try: 1565 result = None
Cell In[11], line 20, in GCNConv.forward(self, x, edge_index) 17 deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0 18 norm = deg_inv_sqrt[row] * deg_inv_sqrt[col] ---> 20 out = self.propagate(edge_index, x=x, norm=norm) 22 return out
File ~/Desktop/Extras/Graph-Neural-Network/.venv/lib/python3.12/site-packages/torch_geometric/nn/conv/message_passing.py:523, in MessagePassing.propagate(self, edge_index, size, kwargs) 521 if res is not None: 522 msg_kwargs = res[0] if isinstance(res, tuple) else res --> 523 out = self.message(msg_kwargs) 524 for hook in self._message_forward_hooks.values(): 525 res = hook(self, (msg_kwargs, ), out)
Cell In[11], line 25, in GCNConv.message(self, x, norm) 24 def message(self, x, norm): ---> 25 return norm.view(-1, 1) * x
RuntimeError: The size of tensor a (84900) must match the size of tensor b (7126) at non-singleton dimension 0" }