Open xiongjun926g opened 1 month ago
!pip install -q pyarrow==14.0.1 datasets==2.10.0
Thanks for help but problems stays the same:
!pip install -q pyarrow==14.0.1 datasets==2.10.0 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 38.0/38.0 MB 11.6 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 469.0/469.0 kB 12.7 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 110.5/110.5 kB 6.8 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 134.3/134.3 kB 4.7 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 194.1/194.1 kB 6.7 MB/s eta 0:00:00
!pip install fastcoref -q Preparing metadata (setup.py) ... done Building wheel for fastcoref (setup.py) ... done
from fastcoref import FCoref
model = FCoref(device='cpu')
HF_TOKEN
does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to access public models or datasets.
warnings.warn(
config.json: 100%
819/819 [00:00<00:00, 14.2kB/s]
tokenizer_config.json: 100%
393/393 [00:00<00:00, 4.15kB/s]
vocab.json: 100%
798k/798k [00:00<00:00, 2.67MB/s]
merges.txt: 100%
456k/456k [00:00<00:00, 3.61MB/s]
tokenizer.json: 100%
1.36M/1.36M [00:00<00:00, 3.82MB/s]
special_tokens_map.json: 100%
239/239 [00:00<00:00, 4.80kB/s]
/usr/local/lib/python3.10/dist-packages/transformers/tokenization_utils_base.py:1601: FutureWarning: clean_up_tokenization_spaces
was not set. It will be set to True
by default. This behavior will be depracted in transformers v4.45, and will be then set to False
by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
pytorch_model.bin: 100%
362M/362M [00:04<00:00, 113MB/s]ValueError Traceback (most recent call last)
...problems stays the same:
no, it's not the same :) The new error means transformers
library was updated on Google Colab again. So just install an older version supported by fastcoref
. I found, for example, transformers==4.31.0
works fine
Hey. You can use this to go around the error you've seen. @xiongjun926g . Seems to load and do inference fine on transformers 4.45.1
from fastcoref import LingMessCoref as OriginalLingMessCoref
from fastcoref import FCoref as OriginalFCoref
from transformers import AutoModel
import functools
class PatchedLingMessCoref(OriginalLingMessCoref):
def __init__(self, *args, **kwargs):
original_from_config = AutoModel.from_config
def patched_from_config(config, *args, **kwargs):
kwargs['attn_implementation'] = 'eager'
return original_from_config(config, *args, **kwargs)
try:
AutoModel.from_config = functools.partial(patched_from_config, attn_implementation='eager')
super().__init__(*args, **kwargs)
finally:
AutoModel.from_config = original_from_config
class PatchedFCoref(OriginalFCoref):
def __init__(self, *args, **kwargs):
original_from_config = AutoModel.from_config
def patched_from_config(config, *args, **kwargs):
kwargs['attn_implementation'] = 'eager'
return original_from_config(config, *args, **kwargs)
try:
AutoModel.from_config = functools.partial(patched_from_config, attn_implementation='eager')
super().__init__(*args, **kwargs)
finally:
AutoModel.from_config = original_from_config
model1 = PatchedLingMessCoref(
nlp="en_core_web_lg",
device="cpu"
)
model2 = PatchedFCoref(
nlp="en_core_web_lg",
device="cpu"
)
# Run your stuff here
Hey. You can use this to go around the error you've seen. @xiongjun926g . Seems to load and do inference fine on transformers 4.45.1
from fastcoref import LingMessCoref as OriginalLingMessCoref from fastcoref import FCoref as OriginalFCoref from transformers import AutoModel import functools class PatchedLingMessCoref(OriginalLingMessCoref): def __init__(self, *args, **kwargs): original_from_config = AutoModel.from_config def patched_from_config(config, *args, **kwargs): kwargs['attn_implementation'] = 'eager' return original_from_config(config, *args, **kwargs) try: AutoModel.from_config = functools.partial(patched_from_config, attn_implementation='eager') super().__init__(*args, **kwargs) finally: AutoModel.from_config = original_from_config class PatchedFCoref(OriginalFCoref): def __init__(self, *args, **kwargs): original_from_config = AutoModel.from_config def patched_from_config(config, *args, **kwargs): kwargs['attn_implementation'] = 'eager' return original_from_config(config, *args, **kwargs) try: AutoModel.from_config = functools.partial(patched_from_config, attn_implementation='eager') super().__init__(*args, **kwargs) finally: AutoModel.from_config = original_from_config model1 = PatchedLingMessCoref( nlp="en_core_web_lg", device="cpu" ) model2 = PatchedFCoref( nlp="en_core_web_lg", device="cpu" ) # Run your stuff here
This works for me, and the error output actually already states how to fix this problem: load your model with the argument
attn_implementation="eager"meanwhile.
@shon-otmazgin Can you please implement that fix. It's an easy one. Thanks.
Its been working fantastic till last week in colab.
!pip install fastcoref==2.1.5 -q
!pip install fastcoref==1.6.0 -q
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 474.3/474.3 kB 8.1 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 9.5/9.5 MB 23.7 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 116.3/116.3 kB 5.4 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 207.3/207.3 kB 11.2 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 39.9/39.9 MB 12.9 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 311.4/311.4 kB 7.1 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 134.8/134.8 kB 6.0 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 194.1/194.1 kB 6.8 MB/s eta 0:00:00 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 62.7/62.7 kB 845.9 kB/s eta 0:00:00 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. cudf-cu12 24.4.1 requires pyarrow<15.0.0a0,>=14.0.1, but you have pyarrow 17.0.0 which is incompatible. ibis-framework 8.0.0 requires pyarrow<16,>=2, but you have pyarrow 17.0.0 which is incompatible.
from fastcoref import FCoref
model = FCoref(device='cuda:0')
model = FCoref(device='cpu')
AttributeError Traceback (most recent call last) in <cell line: 1>()
----> 1 from fastcoref import FCoref
2 # model = FCoref(device='cuda:0')
3 model = FCoref(device='cpu')
9 frames /usr/local/lib/python3.10/dist-packages/pyarrow/_compute.pyx in init pyarrow._compute()
AttributeError: module 'pyarrow.lib' has no attribute 'ListViewType'
import pyarrow pyarrow.version
14.0.2