Closed Banana-Basilisk closed 3 weeks ago
请问有解决办法嘛?我最近也有碰到这个
已解决了,修改三处代码,哪里报错就加一个reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/transformer/utils/nets_utils.py 197行附近 lengths = lengths.reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/contextual_paraformer/decoder.py 283行附近 ys_in_lens = ys_in_lens.reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/paraformer/cif_predictor.py 560行附近 token_length = token_length.reshape(-1)
大佬,问一下哈,底层原因是什么呢?
已解决了,修改三处代码,哪里报错就加一个reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/transformer/utils/nets_utils.py 197行附近 lengths = lengths.reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/contextual_paraformer/decoder.py 283行附近 ys_in_lens = ys_in_lens.reshape(-1) vim /AIproject/ASRTrainer/FunASR-main/funasr/models/paraformer/cif_predictor.py 560行附近 token_length = token_长度.重塑(-1)
Refere
tensor张量维度不符合后续处理要求,这些length应该是一个一维张量,但是只有一个样本的时候,他是一个int型张量,没有维度了,reshape(-1)加一个维度就可以了
我也遇到了,请问官方代码有解决这个问题吗?
报错内容: Error executing job with overrides: ['++model=iic/speech_paraformer-large-contextual_asr_nat-zh-cn-16k-common-vocab8404', '++train_data_set_list=/home/Group/jiangyf/voice_asr/FunASR/data/Specialty_stores/train.jsonl', '++valid_data_set_list=/home/Group/jiangyf/voice_asr/FunASR/data/Specialty_stores/val.jsonl', '++dataset_conf.batch_size=20000', '++dataset_conf.batch_type=token', '++dataset_conf.num_workers=4', '++train_conf.max_epoch=30', '++train_conf.log_interval=1', '++train_conf.resume=false', '++train_conf.validate_interval=2000', '++train_conf.save_checkpoint_interval=2000', '++train_conf.keep_nbest_models=20', '++train_conf.avg_nbest_model=10', '++optim_conf.lr=0.0002', '++output_dir=./outputs_Specialty_stores_v1'] Traceback (most recent call last): File "../../../funasr/bin/train.py", line 225, in
main_hydra()
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/main.py", line 94, in decorated_main
_run_hydra(
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 394, in _run_hydra
_run_app(
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 457, in _run_app
run_and_report(
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 223, in run_and_report
raise ex
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 220, in run_and_report
return func()
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/_internal/utils.py", line 458, in
lambda: hydra.run(
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/internal/hydra.py", line 132, in run
= ret.return_value
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/core/utils.py", line 260, in return_value
raise self._return_value
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/hydra/core/utils.py", line 186, in run_job
ret.return_value = task_function(task_cfg)
File "../../../funasr/bin/train.py", line 48, in main_hydra
main(kwargs)
File "../../../funasr/bin/train.py", line 185, in main
trainer.train_epoch(
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/funasr/train_utils/trainer.py", line 295, in train_epoch
retval = model(batch)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(*args, *kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 1523, in forward
else self._run_ddp_forward(inputs, kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/parallel/distributed.py", line 1359, in _run_ddp_forward
return self.module(*inputs, kwargs) # type: ignore[index]
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(args, kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/funasr/models/contextual_paraformer/model.py", line 107, in forward
encoder_out, encoder_out_lens = self.encode(speech, speech_lengths)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/funasr/models/paraformer/model.py", line 261, in encode
encoder_out, encoder_outlens, = self.encoder(speech, speech_lengths)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1520, in _call_impl
return forward_call(args, **kwargs)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/funasr/models/sanm/encoder.py", line 351, in forward
masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device)
File "/root/miniconda3/envs/funasr_v2/lib/python3.8/site-packages/funasr/models/transformer/utils/nets_utils.py", line 197, in make_pad_mask
bs = int(len(lengths))
TypeError: object of type 'int' has no len()