MDK8888 / GPTFast

Accelerate your Hugging Face Transformers 7.6-9x. Native to Hugging Face and PyTorch.
Apache License 2.0
686 stars 65 forks source link

Doesn't work on kaggle notebooks - ValueError: Unable to compare versions for numpy>=1.17: need=1.17 found=None. This is unusual. Consider reinstalling numpy. #23

Open c1505 opened 7 months ago

c1505 commented 7 months ago

to reproduce

environment

Additional details/thoughts

File /opt/conda/lib/python3.10/site-packages/transformers/init.py:26 23 from typing import TYPE_CHECKING 25 # Check the dependencies satisfy the minimal versions required. ---> 26 from . import dependency_versions_check 27 from .utils import ( 28 OptionalDependencyNotAvailable, 29 _LazyModule, (...) 48 logging, 49 ) 52 logger = logging.get_logger(name) # pylint: disable=invalid-name

File /opt/conda/lib/python3.10/site-packages/transformers/dependency_versions_check.py:57 54 if not is_accelerate_available(): 55 continue # not required, check version only if installed ---> 57 require_version_core(deps[pkg]) 58 else: 59 raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")

File /opt/conda/lib/python3.10/site-packages/transformers/utils/versions.py:117, in require_version_core(requirement) 115 """require_version wrapper which emits a core-specific hint on failure""" 116 hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" --> 117 return require_version(requirement, hint)

File /opt/conda/lib/python3.10/site-packages/transformers/utils/versions.py:111, in require_version(requirement, hint) 109 if want_ver is not None: 110 for op, want_ver in wanted.items(): --> 111 _compare_versions(op, got_ver, want_ver, requirement, pkg, hint)

File /opt/conda/lib/python3.10/site-packages/transformers/utils/versions.py:39, in _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) 37 def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): 38 if got_ver is None or want_ver is None: ---> 39 raise ValueError( 40 f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" 41 f" reinstalling {pkg}." 42 ) 43 if not ops[op](version.parse(got_ver), version.parse(want_ver)): 44 raise ImportError( 45 f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" 46 )

ValueError: Unable to compare versions for numpy>=1.17: need=1.17 found=None. This is unusual. Consider reinstalling numpy.


# the other error

OSError Traceback (most recent call last) File /opt/conda/lib/python3.10/site-packages/transformers/utils/import_utils.py:1472, in _LazyModule._get_module(self, module_name) 1471 try: -> 1472 return importlib.import_module("." + module_name, self.name) 1473 except Exception as e:

File /opt/conda/lib/python3.10/importlib/init.py:126, in import_module(name, package) 125 level += 1 --> 126 return _bootstrap._gcd_import(name[level:], package, level)

File :1050, in _gcd_import(name, package, level)

File :1027, in _find_andload(name, import)

File :1006, in _find_and_loadunlocked(name, import)

File :688, in _load_unlocked(spec)

File :883, in exec_module(self, module)

File :241, in _call_with_frames_removed(f, *args, **kwds)

File /opt/conda/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py:30 29 if is_speech_available(): ---> 30 import torchaudio.compliance.kaldi as ta_kaldi 32 if is_torch_available():

File /opt/conda/lib/python3.10/site-packages/torchaudio/init.py:1 ----> 1 from . import ( # noqa: F401 2 _extension, 3 compliance, 4 datasets, 5 functional, 6 io, 7 kaldi_io, 8 models, 9 pipelines, 10 sox_effects, 11 transforms, 12 utils, 13 ) 14 from ._backend.common import AudioMetaData # noqa

File /opt/conda/lib/python3.10/site-packages/torchaudio/_extension/init.py:45 44 if _IS_TORCHAUDIO_EXT_AVAILABLE: ---> 45 _load_lib("libtorchaudio") 47 import torchaudio.lib._torchaudio # noqa

File /opt/conda/lib/python3.10/site-packages/torchaudio/_extension/utils.py:64, in _load_lib(lib) 63 return False ---> 64 torch.ops.load_library(path) 65 torch.classes.load_library(path)

File /opt/conda/lib/python3.10/site-packages/torch/_ops.py:933, in _Ops.load_library(self, path) 929 with dl_open_guard(): 930 # Import the shared library into the process, thus running its 931 # static (global) initialization code in order to register custom 932 # operators with the JIT. --> 933 ctypes.CDLL(path) 934 self.loaded_libraries.add(path)

File /opt/conda/lib/python3.10/ctypes/init.py:374, in CDLL.init(self, name, mode, handle, use_errno, use_last_error, winmode) 373 if handle is None: --> 374 self._handle = _dlopen(self._name, mode) 375 else:

OSError: /opt/conda/lib/python3.10/site-packages/torchaudio/lib/libtorchaudio.so: undefined symbol: _ZNK5torch8autograd4Node4nameB5cxx11Ev

The above exception was the direct cause of the following exception:

RuntimeError Traceback (most recent call last) Cell In[10], line 77 44 MAX_TOKENS=50 46 cache_config = { 47 "model_config": { 48 "path_to_blocks": ["transformer", "h"], (...) 74 "from torch import nn"] 75 } ---> 77 gpt_fast_model = gpt_fast(model_name, sample_function=argmax, max_length=60, cache_config=cache_config, draft_model_name=draft_model_name) 78 gpt_fast_model.to(device) 80 fast_compile_times = []

File /opt/conda/lib/python3.10/site-packages/GPTFast/Core/GPTFast.py:11, in gpt_fast(model_name, sample_function, max_length, cache_config, spec_dec_kwargs) 9 def gpt_fast(model_name:str, sample_function:Callable, max_length:int, cache_config:dict, spec_dec_kwargs): 10 model = load_int8(model_name) ---> 11 model = add_kv_cache(model, sample_function, max_length, cache_config, dtype=torch.float16) 12 spec_decode = False 13 if spec_dec_kwargs:

File /opt/conda/lib/python3.10/site-packages/GPTFast/Core/KVCache/KVCacheModel.py:208, in add_kv_cache(transformer, sampling_fn, max_length, cache_config, dtype) 207 def add_kv_cache(transformer:nn.Module, sampling_fn:Callable, max_length:int, cache_config:dict, dtype) -> KVCacheModel: --> 208 model = KVCacheModel(transformer, sampling_fn, max_length, cache_config, dtype) 209 return model

File /opt/conda/lib/python3.10/site-packages/GPTFast/Core/KVCache/KVCacheModel.py:21, in KVCacheModel.init(self, model, sample_fn, max_length, cache_config, dtype) 17 self.sample = types.MethodType(sample_fn, self) 19 assert not isinstance(model, BloomForCausalLM), "Bloom models currently have an unsupported kv cache shape." ---> 21 self._model = self.add_static_cache_to_model(model, cache_config, max_length, dtype, self.device) 22 config = self._model.config 23 self._max_length = max_length

File /opt/conda/lib/python3.10/site-packages/GPTFast/Core/KVCache/KVCacheModel.py:50, in KVCacheModel.add_static_cache_to_model(cls, model, cache_config, max_generated_length, dtype, device) 48 module_forward_str_kv_cache = add_input_pos_to_func_str(module_forward_str, forward_prop_ref, "input_pos=input_pos") 49 module_forward_str_kv_cache = add_default_parameter(module_forward_str_kv_cache, "forward", "input_pos", "Optional[torch.Tensor]", None, True) ---> 50 add_str_as_func(module_with_input_pos, "forward", module_forward_str_kv_cache, imports) 52 module_with_input_pos = getattr(module_with_input_pos, prop) 54 assert isinstance(module_with_input_pos, nn.ModuleList), "Once we finish iterating through 'path_to_blocks', the property that you arrive at must be a nn.ModuleList."

File /opt/conda/lib/python3.10/site-packages/GPTFast/Helpers/Class/add_str_as_func.py:11, in add_str_as_func(obj, method_name, func_str, imports) 9 func_code = compile(complete_func_str, "", "exec") 10 namespace = {} ---> 11 exec(func_code, namespace) 13 # Extract the func from the namespace 14 my_func = namespace[method_name]

File :3

File :1073, in _handlefromlist(module, fromlist, import, recursive)

File :1075, in _handlefromlist(module, fromlist, import, recursive)

File /opt/conda/lib/python3.10/site-packages/transformers/utils/import_utils.py:1463, in _LazyModule.getattr(self, name) 1461 elif name in self._class_to_module.keys(): 1462 module = self._get_module(self._class_to_module[name]) -> 1463 value = getattr(module, name) 1464 else: 1465 raise AttributeError(f"module {self.name} has no attribute {name}")

File /opt/conda/lib/python3.10/site-packages/transformers/utils/import_utils.py:1462, in _LazyModule.getattr(self, name) 1460 value = self._get_module(name) 1461 elif name in self._class_to_module.keys(): -> 1462 module = self._get_module(self._class_to_module[name]) 1463 value = getattr(module, name) 1464 else:

File /opt/conda/lib/python3.10/site-packages/transformers/utils/import_utils.py:1474, in _LazyModule._get_module(self, module_name) 1472 return importlib.import_module("." + module_name, self.name) 1473 except Exception as e: -> 1474 raise RuntimeError( 1475 f"Failed to import {self.name}.{module_name} because of the following error (look up to see its" 1476 f" traceback):\n{e}" 1477 ) from e

RuntimeError: Failed to import transformers.models.audio_spectrogram_transformer.feature_extraction_audio_spectrogram_transformer because of the following error (look up to see its traceback): /opt/conda/lib/python3.10/site-packages/torchaudio/lib/libtorchaudio.so: undefined symbol: _ZNK5torch8autograd4Node4nameB5cxx11Ev