Bug description
When trying to run inference using EsmForProteinFolding from HuggingFace transformers library on Windows, an exception is raised
Reproduction steps
Code:
from transformers import AutoTokenizer, EsmForProteinFolding
tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
inputs = tokenizer("MLKNVQVQLV", return_tensors="pt", add_special_tokens=False) # A tiny random peptide
outputs = model(**inputs)
Logs
Traceback (most recent call last):
File "C:\Users\yairs\OneDrive\Documents\University\Master\stuff\python packages tutorials\esm\EsmForProteinFolding.py", line 8, in
outputs = model(inputs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 2156, in forward
structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1965, in forward
structure = self.structure_module(
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1799, in forward
pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1880, in frames_and_literature_positions_to_atom14_pos
return frames_and_literature_positions_to_atom14_pos(
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\openfold_utils\feats.py", line 236, in frames_and_literature_positions_to_atom14_pos
group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
RuntimeError: one_hot is only applicable to index tensor.
Bug description When trying to run inference using EsmForProteinFolding from HuggingFace transformers library on Windows, an exception is raised
Reproduction steps Code: from transformers import AutoTokenizer, EsmForProteinFolding
tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1") model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1")
inputs = tokenizer("MLKNVQVQLV", return_tensors="pt", add_special_tokens=False) # A tiny random peptide
outputs = model(**inputs)
Logs Traceback (most recent call last): File "C:\Users\yairs\OneDrive\Documents\University\Master\stuff\python packages tutorials\esm\EsmForProteinFolding.py", line 8, in
outputs = model(inputs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 2156, in forward
structure: dict = self.trunk(s_s_0, s_z_0, aa, position_ids, attention_mask, no_recycles=num_recycles)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, *kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1965, in forward
structure = self.structure_module(
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(args, kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1799, in forward
pred_xyz = self.frames_and_literature_positions_to_atom14_pos(all_frames_to_global, aatype)
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\modeling_esmfold.py", line 1880, in frames_and_literature_positions_to_atom14_pos
return frames_and_literature_positions_to_atom14_pos(
File "C:\Users\yairs\miniconda3\lib\site-packages\transformers\models\esm\openfold_utils\feats.py", line 236, in frames_and_literature_positions_to_atom14_pos
group_mask_one_hot: torch.LongTensor = nn.functional.one_hot(
RuntimeError: one_hot is only applicable to index tensor.