i have set up the environmet succesfully, but when i run lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256 i got error as
2024-09-26:16:41:09,923 INFO [main.py:251] Verbosity set to INFO
2024-09-26:16:41:11,912 INFO [main.py:335] Selected Tasks: ['arc_challenge', 'arc_easy', 'hellaswag', 'lambada_openai', 'openbookqa', 'piqa', 'winogrande']
2024-09-26:16:41:11,912 INFO [main.py:336] Loading selected tasks...
2024-09-26:16:41:11,912 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
2024-09-26:16:41:12,409 INFO [huggingface.py:162] Using device 'cuda'
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/selective_scan_interface.py:164: FutureWarning: torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead.
def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/selective_scan_interface.py:240: FutureWarning: torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead.
def backward(ctx, dout):
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/triton/layer_norm.py:986: FutureWarning: torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead.
def forward(
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/triton/layer_norm.py:1045: FutureWarning: torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead.
def backward(ctx, dout, args):
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/distributed/tensor_parallel.py:26: FutureWarning: torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead.
def forward(ctx, x, weight, bias, process_group=None, sequence_parallel=True):
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/distributed/tensor_parallel.py:62: FutureWarning: torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead.
def backward(ctx, grad_output):
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/triton/ssd_combined.py:758: FutureWarning: torch.cuda.amp.custom_fwd(args...) is deprecated. Please use torch.amp.custom_fwd(args..., device_type='cuda') instead.
def forward(ctx, zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu",
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/ops/triton/ssd_combined.py:836: FutureWarning: torch.cuda.amp.custom_bwd(args...) is deprecated. Please use torch.amp.custom_bwd(args..., device_type='cuda') instead.
def backward(ctx, dout, args):
2024-09-26:16:41:12,966 INFO [huggingface.py:414] Overrode HF model backend type, and using type 'causal'
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/mamba_ssm/utils/hf.py:18: FutureWarning: You are using torch.load with weights_only=False (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for weights_only will be flipped to True. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via torch.serialization.add_safe_globals. We recommend you start setting weights_only=True for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
return torch.load(resolved_archive_file, map_location=mapped_device)
/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1601: FutureWarning: clean_up_tokenization_spaces was not set. It will be set to True by default. This behavior will be depracted in transformers v4.45, and will be then set to False by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884
warnings.warn(
Traceback (most recent call last):
File "/media/8T3/rh_xu/.conda/envs/mamba/bin/lm_eval", line 8, in
sys.exit(cli_evaluate())
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/main.py", line 342, in cli_evaluate
results = evaluator.simple_evaluate(
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/utils.py", line 288, in _wrapper
return fn(*args, *kwargs)
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/evaluator.py", line 192, in simple_evaluate
task_dict = get_task_dict(tasks, task_manager)
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/tasks/init.py", line 420, in get_task_dict
task_name_from_string_dict = task_manager.load_task_or_group(
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/tasks/init.py", line 270, in load_task_or_group
collections.ChainMap(map(self._load_individual_task_or_group, task_list))
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/tasks/init.py", line 161, in _load_individual_task_or_group
return load_task(task_config, task=name_or_config, group=parent_name)
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/tasks/init.py", line 150, in load_task
task_object = ConfigurableTask(config=config)
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/api/task.py", line 782, in init
self.download(self.config.dataset_kwargs)
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/lm_eval/api/task.py", line 871, in download
self.dataset = datasets.load_dataset(
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/datasets/load.py", line 2074, in load_dataset
builder_instance = load_dataset_builder(
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/datasets/load.py", line 1795, in load_dataset_builder
dataset_module = dataset_module_factory(
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/datasets/load.py", line 1671, in dataset_module_factory
raise e1 from None
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/site-packages/datasets/load.py", line 1617, in dataset_module_factory
can_load_config_from_parquet_export = "DEFAULT_CONFIG_NAME" not in f.read()
File "/media/8T3/rh_xu/.conda/envs/mamba/lib/python3.10/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb5 in position 1: invalid start byte
could you please give a solution? the version of some packages are torch==2.4.1, cuda==12.1, lm-eval==0.4.2, datasets==3.0.0
i have set up the environmet succesfully, but when i run
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
i got error ascould you please give a solution? the version of some packages are torch==2.4.1, cuda==12.1, lm-eval==0.4.2, datasets==3.0.0