facebookresearch / InferSent

InferSent sentence embeddings
Other
2.28k stars 470 forks source link

Getting error with: Expected object of type torch.cuda.FloatTensor but found type torch.FloatTensor for argument #4 'mat1' #78

Closed fabrahman closed 6 years ago

fabrahman commented 6 years ago

Hi, I am getting the following error which I believe has something to do with cuda and cpu version. Anyone face the same problem? I would appreciate help.

Traceback (most recent call last):
  File "infersent.py", line 76, in <module>
    results = se.eval(transfer_tasks)
  File "../senteval/engine.py", line 59, in eval
    self.results = {x: self.eval(x) for x in name}
  File "../senteval/engine.py", line 59, in <dictcomp>
    self.results = {x: self.eval(x) for x in name}
  File "../senteval/engine.py", line 121, in eval
    self.results = self.evaluation.run(self.params, self.batcher)
  File "../senteval/mrpc.py", line 74, in run
    embeddings = batcher(params, batch)
  File "infersent.py", line 43, in batcher
    embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
  File "/tilde/fbrahman/parc/SentEval/examples/models.py", line 226, in encode
    (batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
  File "/tilde/fbrahman/parc/SentEval/examples/models.py", line 68, in forward
    sent_output = self.enc_lstm(sent_packed)[0]  # seqlen x batch x 2*nhid
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
    result = self.forward(*input, **kwargs)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/modules/rnn.py", line 192, in forward
    output, hidden = func(input, self.all_weights, hx, batch_sizes)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 324, in forward
    return func(input, *fargs, **fkwargs)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 244, in forward
    nexth, output = func(input, hidden, weight, batch_sizes)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 87, in forward
    hy, output = inner(input, hidden[l], weight[l], batch_sizes)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 159, in forward
    hidden = inner(step_input, hidden, *weight)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 34, in LSTMCell
    gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
  File "/tilde/fbrahman/anaconda3/envs/py36/lib/python3.6/site-packages/torch/nn/functional.py", line 1024, in linear
    return torch.addmm(bias, input, weight.t())
RuntimeError: Expected object of type torch.cuda.FloatTensor but found type torch.FloatTensor for argument #4 'mat1'

However, this error is solved using pytorch 0.3.1. But new error comes up about:

AttributeError: 'torch.cuda.FloatTensor' object has no attribute 'item'
aconneau commented 6 years ago

Hi,

sorry for the late reply. Could you try again after pulling this latest commit: b30364377422dbb1c41236ef150f1d37d2dc3e5f I believe the error may come from there.

Thank you, Alexis

aconneau commented 6 years ago

Closing as there is no activity on that thread. Please feel free to re-open if you still face the same issue. Best, Alexis

rohankumar0002 commented 5 years ago

Nb words kept : 418/421 (99.29 %)

RuntimeError Traceback (most recent call last)

in () ----> 1 embeddings = model.encode(sentences[0], bsize=128, tokenize=True, verbose=True) /content/SQuAD/models.py in encode(self, sentences, bsize, tokenize, verbose) 210 batch = batch.cuda() 211 batch = self.forward( --> 212 (batch, lengths[stidx:stidx + bsize])).data.cpu().numpy() 213 embeddings.append(batch) 214 embeddings = np.vstack(embeddings) /content/SQuAD/models.py in forward(self, sent_tuple) 53 # Handling padding in Recurrent Networks 54 sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len) ---> 55 sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid 56 sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0] 57 /usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs) 475 result = self._slow_forward(*input, **kwargs) 476 else: --> 477 result = self.forward(*input, **kwargs) 478 for hook in self._forward_hooks.values(): 479 hook_result = hook(self, input, result) /usr/local/lib/python3.6/dist-packages/torch/nn/modules/rnn.py in forward(self, input, hx) 190 flat_weight=flat_weight 191 ) --> 192 output, hidden = func(input, self.all_weights, hx, batch_sizes) 193 if is_packed: 194 output = PackedSequence(output, batch_sizes) /usr/local/lib/python3.6/dist-packages/torch/nn/_functions/rnn.py in forward(input, *fargs, **fkwargs) 322 func = decorator(func) 323 --> 324 return func(input, *fargs, **fkwargs) 325 326 return forward /usr/local/lib/python3.6/dist-packages/torch/nn/_functions/rnn.py in forward(input, weight, hidden, batch_sizes) 242 input = input.transpose(0, 1) 243 --> 244 nexth, output = func(input, hidden, weight, batch_sizes) 245 246 if batch_first and not variable_length: /usr/local/lib/python3.6/dist-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight, batch_sizes) 85 l = i * num_directions + j 86 ---> 87 hy, output = inner(input, hidden[l], weight[l], batch_sizes) 88 next_hidden.append(hy) 89 all_output.append(output) /usr/local/lib/python3.6/dist-packages/torch/nn/_functions/rnn.py in forward(input, hidden, weight, batch_sizes) 157 hidden = (inner(step_input, hidden[0], *weight),) 158 else: --> 159 hidden = inner(step_input, hidden, *weight) 160 161 output.append(hidden[0]) /usr/local/lib/python3.6/dist-packages/torch/nn/_functions/rnn.py in LSTMCell(input, hidden, w_ih, w_hh, b_ih, b_hh) 32 33 hx, cx = hidden ---> 34 gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh) 35 36 ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) /usr/local/lib/python3.6/dist-packages/torch/nn/functional.py in linear(input, weight, bias) 1022 if input.dim() == 2 and bias is not None: 1023 # fused op is marginally faster -> 1024 return torch.addmm(bias, input, weight.t()) 1025 1026 output = input.matmul(weight.t()) RuntimeError: Expected object of type torch.cuda.FloatTensor but found type torch.FloatTensor for argument #4 'mat1'