MasayaKawamura / MB-iSTFT-VITS

Lightweight and High-Fidelity End-to-End Text-to-Speech with Multi-Band Generation and Inverse Short-Time Fourier Transform
Apache License 2.0
417 stars 64 forks source link

How to run as cpu? #16

Closed kdrkdrkdr closed 1 year ago

kdrkdrkdr commented 1 year ago

RuntimeError Traceback (most recent call last) in 5 x_tst = stn_tst.unsqueeze(0) 6 x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) ----> 7 audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1/speed)[0][0,0].data.cpu().float().numpy() 8 ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))

3 frames /content/drive/MyDrive/MB-iSTFT-VITS-multilingual/pqmf.py in init(self, device, subbands, taps, cutoff_ratio, beta) 76 77 # convert to tensor ---> 78 analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1).cuda(device) 79 synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0).cuda(device) 80

RuntimeError: Invalid device, must be cuda device

I ran this code in colab, but this error occured...

%cd /content/drive/MyDrive/MB-iSTFT-VITS-multilingual

import matplotlib.pyplot as plt import IPython.display as ipd

import os import json import math import torch from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader

import commons import utils from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate from models import SynthesizerTrn from text.symbols import symbols from text import text_to_sequence

from scipy.io.wavfile import write

def get_text(text, hps): text_norm = text_to_sequence(text, hps.data.text_cleaners) if hps.data.add_blank: text_norm = commons.intersperse(text_norm, 0) text_norm = torch.LongTensor(text_norm) return text_norm

hps = utils.get_hparams_from_file("./configs/arona.json")

net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hoplength, **hps.model) = net_g.eval()

_ = utils.load_checkpoint("./logs/arona/G_8000.pth", net_g, None)

text = 'こんにちは' speed = 1 stn_tst = get_text(text, hps) with torch.no_grad(): x_tst = stn_tst.unsqueeze(0) x_tst_lengths = torch.LongTensor([stn_tst.size(0)]) audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1/speed)[0][0,0].data.cpu().float().numpy() ipd.display(ipd.Audio(audio, rate=hps.data.sampling_rate, normalize=False))

kdrkdrkdr commented 1 year ago

and my config.json is here.

{ "train": { "log_interval": 200, "eval_interval": 2000, "seed": 1234, "epochs": 10000, "learning_rate": 2e-4, "betas": [0.8, 0.99], "eps": 1e-9, "batch_size": 32, "fp16_run": false, "lr_decay": 0.999875, "segment_size": 8192, "init_lr_ratio": 1, "warmup_epochs": 0, "c_mel": 45, "c_kl": 1.0, "fft_sizes": [384, 683, 171], "hop_sizes": [30, 60, 10], "win_lengths": [150, 300, 60], "window": "hann_window"
}, "data": { "training_files":"../tts_dataset/arona/train.txt.cleaned", "validation_files":"../tts_dataset/arona/val.txt.cleaned", "text_cleaners":["japanese_cleaners2"], "max_wav_value": 32768.0, "sampling_rate": 22050, "filter_length": 1024, "hop_length": 256, "win_length": 1024, "n_mel_channels": 80, "mel_fmin": 0.0, "mel_fmax": null, "add_blank": true, "n_speakers": 0, "cleaned_text": true }, "model": { "ms_istft_vits": false, "mb_istft_vits": true, "istft_vits": false, "subbands": 4, "gen_istft_n_fft": 16, "gen_istft_hop_size": 4, "inter_channels": 192, "hidden_channels": 192, "filter_channels": 768, "n_heads": 2, "n_layers": 6, "kernel_size": 3, "p_dropout": 0.1, "resblock": "1", "resblock_kernel_sizes": [3,7,11], "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], "upsample_rates": [4,4], "upsample_initial_channel": 512, "upsample_kernel_sizes": [16,16], "n_layers_q": 3, "use_spectral_norm": false, "use_sdp": false } }

Aliraheem commented 1 year ago

Have you solved it?

leminhnguyen commented 1 year ago

@kdrkdrkdr change cuda(device) to to(device)

kdrkdrkdr commented 1 year ago

@Aliraheem It Solved.

  1. In models.py,

y_mb_hat = F.conv_transpose1d(y_mb_hat, self.updown_filter.cuda(x.device) * self.subbands, stride=self.subbands)

to

y_mb_hat = F.conv_transpose1d(y_mb_hat, self.updown_filter.to(x.device) * self.subbands, stride=self.subbands)

  1. In pqmf.py analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1).to(device)#.cuda(device) synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0).to(device)#.cuda(device)

    # register coefficients as beffer
    self.register_buffer("analysis_filter", analysis_filter)
    self.register_buffer("synthesis_filter", synthesis_filter)
    
    # filter for downsampling & upsampling
    updown_filter = torch.zeros((subbands, subbands, subbands)).float().to(device)#.cuda(device)