huggingface / optimum-nvidia

Apache License 2.0
894 stars 87 forks source link

ValueError: mutable default <class 'tensorrt_llm.lora_manager.LoraBuildConfig'> for field lora_config is not allowed: use default_factory #119

Open manish-marwah opened 7 months ago

manish-marwah commented 7 months ago

I get the following error on importing pipeline. Any ideas? Thanks.

from optimum.nvidia.pipelines import pipeline

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[10], line 1
----> 1 from optimum.nvidia.pipelines import pipeline

File ~/anaconda3/lib/python3.11/site-packages/optimum/nvidia/__init__.py:16
      1 #  coding=utf-8
      2 #  Copyright 2024 The HuggingFace Inc. team. All rights reserved.
      3 #  #
   (...)
     13 #  See the License for the specific language governing permissions and
     14 #  limitations under the License.
---> 16 from .config import TensorRTConfig
     17 from .logging import DEFAULT_LOGGING_FMT, setup_logging
     18 from .models import AutoModelForCausalLM

File ~/anaconda3/lib/python3.11/site-packages/optimum/nvidia/config.py:21
     18 from typing import Optional, Union
     20 import torch
---> 21 from tensorrt_llm import Mapping
     22 from tensorrt_llm.models import PretrainedConfig as TensorRTPretrainedConfig
     23 from tensorrt_llm.models.modeling_utils import (
     24     QuantizationConfig as TensorRTQuantizationConfig,
     25 )

File ~/anaconda3/lib/python3.11/site-packages/tensorrt_llm/__init__.py:35
     33 import tensorrt_llm.models as models
     34 import tensorrt_llm.quantization as quantization
---> 35 import tensorrt_llm.runtime as runtime
     36 import tensorrt_llm.tools as tools
     38 from ._common import _init, default_net, default_trtnet, precision

File ~/anaconda3/lib/python3.11/site-packages/tensorrt_llm/runtime/__init__.py:22
     16 from .generation import (ChatGLMGenerationSession, GenerationSession,
     17                          LogitsProcessor, LogitsProcessorList,
     18                          MambaLMHeadModelGenerationSession, ModelConfig,
     19                          QWenForCausalLMGenerationSession, StoppingCriteria,
     20                          StoppingCriteriaList, to_word_list_format)
     21 from .kv_cache_manager import GenerationSequence, KVCacheManager
---> 22 from .model_runner import ModelRunner
     23 from .session import Session, TensorInfo
     25 try:

File ~/anaconda3/lib/python3.11/site-packages/tensorrt_llm/runtime/model_runner.py:27
     25 from .. import profiler
     26 from .._utils import mpi_world_size
---> 27 from ..builder import Engine, get_engine_version
     28 from ..logger import logger
     29 from ..mapping import Mapping

File ~/anaconda3/lib/python3.11/site-packages/tensorrt_llm/builder.py:408
    404         to_json_file(config, config_path)
    405         logger.info(f'Config saved to {config_path}.')
--> 408 @dataclass
    409 class BuildConfig:
    410     max_input_len: int = 256
    411     max_output_len: int = 256

File ~/anaconda3/lib/python3.11/dataclasses.py:1230, in dataclass(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots, weakref_slot)
   1227     return wrap
   1229 # We're called as @dataclass without parens.
-> 1230 return wrap(cls)

File ~/anaconda3/lib/python3.11/dataclasses.py:1220, in dataclass.<locals>.wrap(cls)
   1219 def wrap(cls):
-> 1220     return _process_class(cls, init, repr, eq, order, unsafe_hash,
   1221                           frozen, match_args, kw_only, slots,
   1222                           weakref_slot)

File ~/anaconda3/lib/python3.11/dataclasses.py:958, in _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, match_args, kw_only, slots, weakref_slot)
    955         kw_only = True
    956     else:
    957         # Otherwise it's a field of some type.
--> 958         cls_fields.append(_get_field(cls, name, type, kw_only))
    960 for f in cls_fields:
    961     fields[f.name] = f

File ~/anaconda3/lib/python3.11/dataclasses.py:815, in _get_field(cls, a_name, a_type, default_kw_only)
    811 # For real fields, disallow mutable defaults.  Use unhashable as a proxy
    812 # indicator for mutability.  Read the __hash__ attribute from the class,
    813 # not the instance.
    814 if f._field_type is _FIELD and f.default.__class__.__hash__ is None:
--> 815     raise ValueError(f'mutable default {type(f.default)} for field '
    816                      f'{f.name} is not allowed: use default_factory')
    818 return f

ValueError: mutable default <class 'tensorrt_llm.lora_manager.LoraBuildConfig'> for field lora_config is not allowed: use default_factory
HuangHaoyu1997 commented 7 months ago

same issue

yblir commented 7 months ago

same issue

Qihoysa commented 6 months ago

same issue

KnightLancelot commented 6 months ago

me too

brettbj commented 6 months ago

https://github.com/NVIDIA/TensorRT-LLM/issues/1323

This issue might be promising. Going to try reverting to 3.10.6