Open hjyya opened 1 year ago
代码: trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") UnicodeEncodeError: 'ascii' codec can't encode characters in position 210-213: ordinal not in range(128) 完整报错: Training: 0it [00:00, ?it/s] Training: 0%| | 0/200 [00:00<?, ?it/s] Epoch 0: 0%| | 0/200 [00:00<?, ?it/s] {'zero_allow_untested_optimizer': True, 'zero_optimization': {'stage': 2, 'contiguous_gradients': True, 'overlap_comm': True, 'allgather_partitions': True, 'reduce_scatter': True, 'allgather_bucket_size': 200000000, 'reduce_bucket_size': 200000000, 'sub_group_size': 1000000000000}, 'activation_checkpointing': {'partition_activations': False, 'cpu_checkpointing': False, 'contiguous_memory_optimization': False, 'synchronize_checkpoint_boundary': False}, 'aio': {'block_size': 1048576, 'queue_depth': 8, 'single_submit': False, 'overlap_events': True, 'thread_count': 1}, 'gradient_accumulation_steps': 8, 'train_micro_batch_size_per_gpu': 1, 'gradient_clipping': 1.0, 'bf16': {'enabled': True}} Epoch 0: 0%| | 1/200 [00:02<09:07, 2.75s/it] Epoch 0: 0%| | 1/200 [00:02<09:08, 2.75s/it, loss=2.530, lr=5e-5]Traceback (most recent call last): File "/mnt/f/RWKV/./finetune/lora/train.py", line 479, in trainer.fit(model, data_loader) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 608, in fit call._call_and_handle_interrupt( File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/call.py", line 36, in _call_and_handle_interrupt return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, kwargs) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/strategies/launchers/subprocess_script.py", line 88, in launch return function(*args, *kwargs) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 650, in _fit_impl self._run(model, ckpt_path=self.ckpt_path) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1112, in _run results = self._run_stage() File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1191, in _run_stage self._run_train() File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1214, in _run_train self.fit_loop.run() File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/loop.py", line 199, in run self.advance(args, kwargs) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/fit_loop.py", line 267, in advance self._outputs = self.epoch_loop.run(self._data_fetcher) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/loop.py", line 199, in run self.advance(*args, *kwargs) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/epoch/training_epoch_loop.py", line 203, in advance self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx) File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1394, in _call_callback_hooks fn(self, self.lightning_module, args, **kwargs) File "/mnt/f/RWKV/finetune/lora/src/trainer.py", line 63, in on_train_batch_start trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") UnicodeEncodeError: 'ascii' codec can't encode characters in position 210-213: ordinal not in range(128) Epoch 0: 0%| | 1/200 [00:03<10:08, 3.06s/it, loss=2.530, lr=5e-5] WSL:
看起来像是训练数据的文本编码问题 试试默认示例数据能不能训练
同样问题【 'ascii' codec can't encode characters in position 210-213: ordinal not in range(128)】 示例数据是可以训练,但随便用小说数据进行训练似乎就会遇到这个
代码: trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") UnicodeEncodeError: 'ascii' codec can't encode characters in position 210-213: ordinal not in range(128) 完整报错: Training: 0it [00:00, ?it/s] Training: 0%| | 0/200 [00:00<?, ?it/s] Epoch 0: 0%| | 0/200 [00:00<?, ?it/s] {'zero_allow_untested_optimizer': True, 'zero_optimization': {'stage': 2, 'contiguous_gradients': True, 'overlap_comm': True, 'allgather_partitions': True, 'reduce_scatter': True, 'allgather_bucket_size': 200000000, 'reduce_bucket_size': 200000000, 'sub_group_size': 1000000000000}, 'activation_checkpointing': {'partition_activations': False, 'cpu_checkpointing': False, 'contiguous_memory_optimization': False, 'synchronize_checkpoint_boundary': False}, 'aio': {'block_size': 1048576, 'queue_depth': 8, 'single_submit': False, 'overlap_events': True, 'thread_count': 1}, 'gradient_accumulation_steps': 8, 'train_micro_batch_size_per_gpu': 1, 'gradient_clipping': 1.0, 'bf16': {'enabled': True}} Epoch 0: 0%| | 1/200 [00:02<09:07, 2.75s/it] Epoch 0: 0%| | 1/200 [00:02<09:08, 2.75s/it, loss=2.530, lr=5e-5]Traceback (most recent call last): File "/mnt/f/RWKV/./finetune/lora/train.py", line 479, in
trainer.fit(model, data_loader)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 608, in fit
call._call_and_handle_interrupt(
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/call.py", line 36, in _call_and_handle_interrupt
return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, kwargs)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/strategies/launchers/subprocess_script.py", line 88, in launch
return function(*args, *kwargs)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 650, in _fit_impl
self._run(model, ckpt_path=self.ckpt_path)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1112, in _run
results = self._run_stage()
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1191, in _run_stage
self._run_train()
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1214, in _run_train
self.fit_loop.run()
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/loop.py", line 199, in run
self.advance(args, kwargs)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/fit_loop.py", line 267, in advance
self._outputs = self.epoch_loop.run(self._data_fetcher)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/loop.py", line 199, in run
self.advance(*args, *kwargs)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/loops/epoch/training_epoch_loop.py", line 203, in advance
self.trainer._call_callback_hooks("on_train_batch_start", batch, batch_idx)
File "/usr/local/lib/python3.10/dist-packages/pytorch_lightning/trainer/trainer.py", line 1394, in _call_callback_hooks
fn(self, self.lightning_module, args, **kwargs)
File "/mnt/f/RWKV/finetune/lora/src/trainer.py", line 63, in on_train_batch_start
trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n")
UnicodeEncodeError: 'ascii' codec can't encode characters in position 210-213: ordinal not in range(128)
Epoch 0: 0%| | 1/200 [00:03<10:08, 3.06s/it, loss=2.530, lr=5e-5]
WSL: