I may not have framed the title exactly (forgive me for that), but I meant that any file for starting test or training causes me the same error.
Here is the error I get when trying to run test.sh from ru_ipa_fastpitch_hifigan:
./test.sh
./test.sh: строка 2: conda: команда не найдена
fatal: целевой путь «ru_g2p_ipa_bert_large» уже существует и не является пустым каталогом.
fatal: целевой путь «tts_ru_ipa_fastpitch_ruslan» уже существует и не является пустым каталогом.
fatal: целевой путь «tts_ru_hifigan_ruslan» уже существует и не является пустым каталогом.
Traceback (most recent call last):
File "NeMo/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py", line 42, in <module>
from helpers import ITN_MODEL, instantiate_model_and_trainer
File "/home/egor/synthesys/NeMo/examples/nlp/text_normalization_as_tagging/helpers.py", line 22, in <module>
from nemo.collections.nlp.models import ThutmoseTaggerModel
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/nlp/__init__.py", line 15, in <module>
from nemo.collections.nlp import data, losses, models, modules
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/nlp/data/__init__.py", line 17, in <module>
from nemo.collections.nlp.data.information_retrieval.information_retrieval_dataset import (
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/nlp/data/information_retrieval/__init__.py", line 15, in <module>
from nemo.collections.nlp.data.information_retrieval.information_retrieval_dataset import (
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/nlp/data/information_retrieval/information_retrieval_dataset.py", line 24, in <module>
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/__init__.py", line 16, in <module>
from nemo.collections.common import data, losses, parts, tokenizers
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/__init__.py", line 17, in <module>
from nemo.collections.common.tokenizers.canary_tokenizer import CanaryTokenizer
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/canary_tokenizer.py", line 51, in <module>
class CanaryTokenizer(AggregateTokenizer):
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/canary_tokenizer.py", line 105, in CanaryTokenizer
def build_special_tokenizer(output_dir: str | Path) -> SentencePieceTokenizer:
TypeError: unsupported operand type(s) for |: 'type' and 'type'
Traceback (most recent call last):
File "nemo_compatible/scripts/tts/ru_g2p_ipa/preprocess_text_before_tts.py", line 30, in <module>
with open(args.g2p_name, "r", encoding="utf-8") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'test_input.txt.words.g2p'
Traceback (most recent call last):
File "nemo_compatible/scripts/tts/tts_infer.py", line 6, in <module>
from nemo.collections.tts.models.base import SpectrogramGenerator, Vocoder
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/tts/__init__.py", line 15, in <module>
from nemo.collections.tts import data, losses, models, modules
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/tts/models/__init__.py", line 16, in <module>
from nemo.collections.tts.models.audio_codec import AudioCodecModel
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/tts/models/audio_codec.py", line 27, in <module>
from nemo.collections.tts.losses.audio_codec_loss import (
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/tts/losses/audio_codec_loss.py", line 21, in <module>
from nemo.collections.asr.parts.preprocessing.features import FilterbankFeatures
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/__init__.py", line 15, in <module>
from nemo.collections.asr import data, losses, models, modules
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/losses/__init__.py", line 16, in <module>
from nemo.collections.asr.losses.audio_losses import SDRLoss
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/losses/audio_losses.py", line 21, in <module>
from nemo.collections.asr.parts.preprocessing.features import make_seq_mask_like
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/parts/preprocessing/__init__.py", line 16, in <module>
from nemo.collections.asr.parts.preprocessing.features import FeaturizerFactory, FilterbankFeatures, WaveformFeaturizer
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/parts/preprocessing/features.py", line 44, in <module>
from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/asr/parts/preprocessing/perturb.py", line 50, in <module>
from nemo.collections.common.parts.preprocessing import collections, parsers
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/__init__.py", line 16, in <module>
from nemo.collections.common import data, losses, parts, tokenizers
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/__init__.py", line 17, in <module>
from nemo.collections.common.tokenizers.canary_tokenizer import CanaryTokenizer
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/canary_tokenizer.py", line 51, in <module>
class CanaryTokenizer(AggregateTokenizer):
File "/home/egor/.local/lib/python3.8/site-packages/nemo/collections/common/tokenizers/canary_tokenizer.py", line 105, in CanaryTokenizer
def build_special_tokenizer(output_dir: str | Path) -> SentencePieceTokenizer:
TypeError: unsupported operand type(s) for |: 'type' and 'type'
I may not have framed the title exactly (forgive me for that), but I meant that any file for starting test or training causes me the same error.
Here is the error I get when trying to run test.sh from ru_ipa_fastpitch_hifigan:
My system: ubuntu 22.04.04 Python: 3.8 Pip list: