yl4579 / StyleTTS

Official Implementation of StyleTTS
MIT License
396 stars 64 forks source link

running train_first.py raises error #37

Closed ghost closed 1 year ago

ghost commented 1 year ago

(demo) C:\Users\Administrator\StyleTTS>python train_first.py --config_path ./Configs/config.yml {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} bert loaded bert_encoder loaded predictor loaded decoder loaded pitch_extractor loaded text_encoder loaded style_encoder loaded text_aligner loaded discriminator loaded ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ C:\Users\Administrator\StyleTTS\train_first.py:393 in │ │ │ │ 390 │ torch.save(state, save_path) │ │ 391 │ │ 392 if name=="main": │ │ ❱ 393 │ main() │ │ 394 │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1157 in call │ │ │ │ 1154 │ │ │ 1155 │ def call(self, *args: t.Any, kwargs: t.Any) -> t.Any: │ │ 1156 │ │ """Alias for :meth:main.""" │ │ ❱ 1157 │ │ return self.main(*args, kwargs) │ │ 1158 │ │ 1159 │ │ 1160 class Command(BaseCommand): │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1078 in main │ │ │ │ 1075 │ │ try: │ │ 1076 │ │ │ try: │ │ 1077 │ │ │ │ with self.make_context(prog_name, args, extra) as ctx: │ │ ❱ 1078 │ │ │ │ │ rv = self.invoke(ctx) │ │ 1079 │ │ │ │ │ if not standalone_mode: │ │ 1080 │ │ │ │ │ │ return rv │ │ 1081 │ │ │ │ │ # it's not safe to ctx.exit(rv) here! │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1434 in invoke │ │ │ │ 1431 │ │ │ echo(style(message, fg="red"), err=True) │ │ 1432 │ │ │ │ 1433 │ │ if self.callback is not None: │ │ ❱ 1434 │ │ │ return ctx.invoke(self.callback, *ctx.params) │ │ 1435 │ │ │ 1436 │ def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: │ │ 1437 │ │ """Return a list of completions for the incomplete value. Looks │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:783 in invoke │ │ │ │ 780 │ │ │ │ 781 │ │ with augment_usage_errors(self): │ │ 782 │ │ │ with ctx: │ │ ❱ 783 │ │ │ │ return callback(args, kwargs) │ │ 784 │ │ │ 785 │ def forward( │ │ 786 │ │ self, cmd: "Command", *args: t.Any, *kwargs: t.Any # noqa: B902 │ │ │ │ C:\Users\Administrator\StyleTTS\trainfirst.py:140 in main │ │ │ │ 137 │ │ │ │ 138 │ │ = [model[key].train() for key in model] │ │ 139 │ │ │ │ ❱ 140 │ │ for i, batch in enumerate(train_dataloader): │ │ 141 │ │ │ │ │ 142 │ │ │ batch = [b.to(device) for b in batch] │ │ 143 │ │ │ texts, input_lengths, mels, mel_input_length = batch │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\torch\utils\data\dataloader.py:633 │ │ in next │ │ │ │ 630 │ │ │ if self._sampler_iter is None: │ │ 631 │ │ │ │ # TODO(https://github.com/pytorch/pytorch/issues/76750) │ │ 632 │ │ │ │ self._reset() # type: ignore[call-arg] │ │ ❱ 633 │ │ │ data = self._next_data() │ │ 634 │ │ │ self._num_yielded += 1 │ │ 635 │ │ │ if self._dataset_kind == _DatasetKind.Iterable and \ │ │ 636 │ │ │ │ │ self._IterableDataset_len_called is not None and \ │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\torch\utils\data\dataloader.py:134 │ │ 5 in _next_data │ │ │ │ 1342 │ │ │ │ self._task_info[idx] += (data,) │ │ 1343 │ │ │ else: │ │ 1344 │ │ │ │ del self._task_info[idx] │ │ ❱ 1345 │ │ │ │ return self._process_data(data) │ │ 1346 │ │ │ 1347 │ def _try_put_index(self): │ │ 1348 │ │ assert self._tasks_outstanding < self._prefetch_factor self._num_workers │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\torch\utils\data\dataloader.py:137 │ │ 1 in _process_data │ │ │ │ 1368 │ │ self._rcvd_idx += 1 │ │ 1369 │ │ self._try_put_index() │ │ 1370 │ │ if isinstance(data, ExceptionWrapper): │ │ ❱ 1371 │ │ │ data.reraise() │ │ 1372 │ │ return data │ │ 1373 │ │ │ 1374 │ def _mark_worker_as_unavailable(self, worker_id, shutdown=False): │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\torch_utils.py:644 in reraise │ │ │ │ 641 │ │ │ # If the exception takes multiple arguments, don't try to │ │ 642 │ │ │ # instantiate since we don't know how to │ │ 643 │ │ │ raise RuntimeError(msg) from None │ │ ❱ 644 │ │ raise exception │ │ 645 │ │ 646 │ │ 647 def _get_available_device_type(): │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ LibsndfileError: <exception str() failed>

ghost commented 1 year ago

Also I am assuming one has to set load_only_params: true while loading a pretrained model because if not it raises another error

python train_first.py --config_path ./Configs/config.yml {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} {'max_lr': 0.0001, 'pct_start': 0.0, 'epochs': 200, 'steps_per_epoch': 3} bert loaded bert_encoder loaded predictor loaded decoder loaded pitch_extractor loaded text_encoder loaded style_encoder loaded text_aligner loaded discriminator loaded ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ C:\Users\Administrator\StyleTTS\train_first.py:393 in │ │ │ │ 390 │ torch.save(state, save_path) │ │ 391 │ │ 392 if name=="main": │ │ ❱ 393 │ main() │ │ 394 │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1157 in call │ │ │ │ 1154 │ │ │ 1155 │ def call(self, *args: t.Any, kwargs: t.Any) -> t.Any: │ │ 1156 │ │ """Alias for :meth:main.""" │ │ ❱ 1157 │ │ return self.main(*args, kwargs) │ │ 1158 │ │ 1159 │ │ 1160 class Command(BaseCommand): │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1078 in main │ │ │ │ 1075 │ │ try: │ │ 1076 │ │ │ try: │ │ 1077 │ │ │ │ with self.make_context(prog_name, args, extra) as ctx: │ │ ❱ 1078 │ │ │ │ │ rv = self.invoke(ctx) │ │ 1079 │ │ │ │ │ if not standalone_mode: │ │ 1080 │ │ │ │ │ │ return rv │ │ 1081 │ │ │ │ │ # it's not safe to ctx.exit(rv) here! │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:1434 in invoke │ │ │ │ 1431 │ │ │ echo(style(message, fg="red"), err=True) │ │ 1432 │ │ │ │ 1433 │ │ if self.callback is not None: │ │ ❱ 1434 │ │ │ return ctx.invoke(self.callback, *ctx.params) │ │ 1435 │ │ │ 1436 │ def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]: │ │ 1437 │ │ """Return a list of completions for the incomplete value. Looks │ │ │ │ C:\Users\Administrator\miniconda3\envs\demo\lib\site-packages\click\core.py:783 in invoke │ │ │ │ 780 │ │ │ │ 781 │ │ with augment_usage_errors(self): │ │ 782 │ │ │ with ctx: │ │ ❱ 783 │ │ │ │ return callback(args, kwargs) │ │ 784 │ │ │ 785 │ def forward( │ │ 786 │ │ self, cmd: "Command", *args: t.Any, **kwargs: t.Any # noqa: B902 │ │ │ │ C:\Users\Administrator\StyleTTS\train_first.py:119 in main │ │ │ │ 116 │ │ │ model[key] = MyDataParallel(model[key]) │ │ 117 │ │ │ 118 │ if config.get('pretrained_model', '') != '': │ │ ❱ 119 │ │ model, optimizer, start_epoch, iters = load_checkpoint(model, optimizer, config │ │ 120 │ │ │ │ │ │ │ │ │ load_only_params=config.get('load_only_params', True │ │ 121 │ else: │ │ 122 │ │ start_epoch = 0 │ │ │ │ C:\Users\Administrator\StyleTTS\models.py:735 in loadcheckpoint │ │ │ │ 732 │ = [model[key].eval() for key in model] │ │ 733 │ │ │ 734 │ if not load_only_params: │ │ ❱ 735 │ │ epoch = state["epoch"] │ │ 736 │ │ iters = state["iters"] │ │ 737 │ │ optimizer.load_state_dict(state["optimizer"]) │ │ 738 │ else: │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ KeyError: 'epoch'

Artyom17 commented 1 year ago

Most likely it can't find a file to train on. If you are using LJSpeech dataset, copy the LJSpeech-1.1 directory into the root of the repository (i.e. it should be NOT in styletts/Data/LJSpeech-1.1, but just in styletts/LJSpeech-1.1

ghost commented 1 year ago

yeah found that in code later and posted the same in another issue