Open C9H20sx opened 1 year ago
他的代码有点小bug,奇怪的是没人发现。 有一个地方要把bucket的空值排除掉,代码错了
他的代码有点小bug,奇怪的是没人发现。 有一个地方要把bucket的空值排除掉,代码错了
应该改哪些地方呢,可以请教一下吗
data_utils.py的第323行: for i in range(len(buckets) - 1, 0, -1): 把0改成-1,不然他的这种写法对于没有短数据的数据集就会一直报错
data_utils.py的第323行: for i in range(len(buckets) - 1, 0, -1): 把0改成-1,不然他的这种写法对于没有短数据的数据集就会一直报错
非常感谢,问题真的解决了,果然专业的问题需要专业的人回答,在百度中无解……
--- Logging error --- Traceback (most recent call last): File "D:\ProgramData\Anaconda3\envs\vits\lib\logging__init__.py", line 1028, in emit stream.write(msg + self.terminator) UnicodeEncodeError: 'gbk' codec can't encode character '\u0283' in position 1329: illegal multibyte sequence Call stack: File "", line 1, in
File "D:\ProgramData\Anaconda3\envs\vits\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "D:\ProgramData\Anaconda3\envs\vits\lib\multiprocessing\spawn.py", line 118, in _main
return self._bootstrap()
File "D:\ProgramData\Anaconda3\envs\vits\lib\multiprocessing\process.py", line 297, in _bootstrap
self.run()
File "D:\ProgramData\Anaconda3\envs\vits\lib\multiprocessing\process.py", line 99, in run
self._target(*self._args, *self._kwargs)
File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\multiprocessing\spawn.py", line 69, in _wrap
fn(i, args)
File "D:\vits-main\train.py", line 62, in run
logger.info(hps)
Message: {'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 25000, 'learning_rate': 0.0002, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 13, 'fp16_run': True, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/list.txt.cleaned', 'validation_files': 'filelists/list_val.txt.cleaned', 'text_cleaners': ['japanese_cleaners'], 'max_wav_value': 32768.0, 'sampling_rate': 22050, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': True, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'ginchannels': 256}, 'symbols': ['', ',', '.', '!', '?', '-', 'A', 'E', 'I', 'N', 'O', 'Q', 'U', 'a', 'b', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'ʃ', 'ʧ', '↓', '↑', ' '], 'model_dir': 'D:/rem'}
Arguments: ()
INFO:rem:{'train': {'log_interval': 200, 'eval_interval': 1000, 'seed': 1234, 'epochs': 25000, 'learning_rate': 0.0002, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 13, 'fp16_run': True, 'lr_decay': 0.999875, 'segment_size': 8192, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0}, 'data': {'training_files': 'filelists/list.txt.cleaned', 'validation_files': 'filelists/list_val.txt.cleaned', 'text_cleaners': ['japanese_cleaners'], 'max_wav_value': 32768.0, 'sampling_rate': 22050, 'filter_length': 1024, 'hop_length': 256, 'win_length': 1024, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': None, 'add_blank': True, 'n_speakers': 0, 'cleaned_text': True}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'ginchannels': 256}, 'symbols': ['', ',', '.', '!', '?', '-', 'A', 'E', 'I', 'N', 'O', 'Q', 'U', 'a', 'b', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'ʃ', 'ʧ', '↓', '↑', ' '], 'model_dir': 'D:/rem'}
WARNING:rem:D:\vits-main is not a git repository, therefore hash value comparison will be ignored.
INFO:torch.distributed.distributed_c10d:Added key: store_based_barrier_key:1 to store for rank: 0
INFO:torch.distributed.distributed_c10d:Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 1 nodes.
D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py:557: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 4 (
main()
File "train.py", line 55, in main
mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,))
File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\multiprocessing\spawn.py", line 240, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\multiprocessing\spawn.py", line 198, in start_processes
while not context.join():
File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\multiprocessing\spawn.py", line 160, in join
raise ProcessRaisedException(msg, error_index, failed_process.pid)
torch.multiprocessing.spawn.ProcessRaisedException:
cpuset
is not taken into account), which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary. cpuset_checked)) Exception ignored in: <function _MultiProcessingDataLoaderIter.del at 0x000001BA73DB9798> Traceback (most recent call last): File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 1466, in del self._shutdown_workers() File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 1397, in _shutdown_workers if not self._shutdown: AttributeError: '_MultiProcessingDataLoaderIter' object has no attribute '_shutdown' Traceback (most recent call last): File "train.py", line 301, in-- Process 0 terminated with the following error: Traceback (most recent call last): File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\multiprocessing\spawn.py", line 69, in _wrap fn(i, args) File "D:\vits-main\train.py", line 122, in run train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) File "D:\vits-main\train.py", line 142, in train_and_evaluate for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths) in enumerate(train_loader): File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 435, in iter return self._get_iterator() File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 381, in _get_iterator return _MultiProcessingDataLoaderIter(self) File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 988, in init super(_MultiProcessingDataLoaderIter, self).init(loader) File "D:\ProgramData\Anaconda3\envs\vits\lib\site-packages\torch\utils\data\dataloader.py", line 598, in init self._sampler_iter = iter(self._index_sampler) File "D:\vits-main\data_utils.py", line 358, in iter ids_bucket = ids_bucket + ids_bucket (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] ZeroDivisionError: integer division or modulo by zero