|
|
import torch |
|
|
import nltk |
|
|
nltk.download('punkt', download_dir='./') |
|
|
nltk.download('punkt_tab', download_dir='./') |
|
|
nltk.data.path.append('.') |
|
|
import librosa |
|
|
import audiofile |
|
|
import torch.nn.functional as F |
|
|
import math |
|
|
import numpy as np |
|
|
import torch.nn as nn |
|
|
import string |
|
|
import textwrap |
|
|
import phonemizer |
|
|
from espeak_util import set_espeak_library |
|
|
from transformers import AlbertConfig, AlbertModel |
|
|
from huggingface_hub import hf_hub_download |
|
|
from nltk.tokenize import word_tokenize |
|
|
from torch.nn import Conv1d, ConvTranspose1d |
|
|
from torch.nn.utils.parametrizations import weight_norm |
|
|
from torch.nn.utils import spectral_norm |
|
|
|
|
|
_pad = "$" |
|
|
_punctuation = ';:,.!?¡¿—…"«»“” ' |
|
|
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' |
|
|
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" |
|
|
MAX_PHONEMES = 424 |
|
|
|
|
|
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) |
|
|
|
|
|
dicts = {} |
|
|
for i in range(len((symbols))): |
|
|
dicts[symbols[i]] = i |
|
|
|
|
|
|
|
|
class TextCleaner: |
|
|
def __init__(self, dummy=None): |
|
|
self.word_index_dictionary = dicts |
|
|
print(len(dicts)) |
|
|
|
|
|
def __call__(self, text): |
|
|
indexes = [] |
|
|
for char in text: |
|
|
try: |
|
|
indexes.append(self.word_index_dictionary[char]) |
|
|
except KeyError: |
|
|
|
|
|
|
|
|
pass |
|
|
return indexes |
|
|
|
|
|
set_espeak_library() |
|
|
|
|
|
textclenaer = TextCleaner() |
|
|
|
|
|
global_phonemizer = phonemizer.backend.EspeakBackend(language="en-us", preserve_punctuation=True, with_stress=True) |
|
|
|
|
|
def _del_prefix(d): |
|
|
|
|
|
out = {} |
|
|
for k, v in d.items(): |
|
|
out[k[7:]] = v |
|
|
return out |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class StyleTTS2(nn.Module): |
|
|
|
|
|
def __init__(self): |
|
|
super().__init__() |
|
|
albert_base_configuration = AlbertConfig(vocab_size=178, |
|
|
hidden_size=768, |
|
|
num_attention_heads=12, |
|
|
intermediate_size=2048, |
|
|
max_position_embeddings=512, |
|
|
num_hidden_layers=12, |
|
|
dropout=0.1) |
|
|
self.bert = AlbertModel(albert_base_configuration) |
|
|
state_dict = torch.load(hf_hub_download(repo_id='dkounadis/artificial-styletts2', |
|
|
filename='Utils/PLBERT/step_1000000.pth'), |
|
|
map_location='cpu')['net'] |
|
|
new_state_dict = {} |
|
|
for k, v in state_dict.items(): |
|
|
name = k[7:] |
|
|
if name.startswith('encoder.'): |
|
|
name = name[8:] |
|
|
new_state_dict[name] = v |
|
|
del new_state_dict["embeddings.position_ids"] |
|
|
self.bert.load_state_dict(new_state_dict, strict=True) |
|
|
self.decoder = Decoder(dim_in=512, |
|
|
style_dim=128, |
|
|
dim_out=80, |
|
|
resblock_kernel_sizes=[3, 7, 11], |
|
|
upsample_rates=[10, 5, 3, 2], |
|
|
upsample_initial_channel=512, |
|
|
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], |
|
|
upsample_kernel_sizes=[20, 10, 6, 4]) |
|
|
self.text_encoder = TextEncoder(channels=512, |
|
|
kernel_size=5, |
|
|
depth=3, |
|
|
n_symbols=178, |
|
|
) |
|
|
self.predictor = ProsodyPredictor(style_dim=128, |
|
|
d_hid=512, |
|
|
nlayers=3, |
|
|
max_dur=50) |
|
|
self.style_encoder = StyleEncoder() |
|
|
self.predictor_encoder = StyleEncoder() |
|
|
self.bert_encoder = torch.nn.Linear(self.bert.config.hidden_size, 512) |
|
|
self.mel_spec = MelSpec() |
|
|
params = torch.load(hf_hub_download(repo_id='yl4579/StyleTTS2-LibriTTS', |
|
|
filename='Models/LibriTTS/epochs_2nd_00020.pth'), |
|
|
map_location='cpu')['net'] |
|
|
self.bert.load_state_dict(_del_prefix(params['bert']), strict=True) |
|
|
self.bert_encoder.load_state_dict(_del_prefix(params['bert_encoder']), strict=True) |
|
|
self.predictor.load_state_dict(_del_prefix(params['predictor']), strict=True) |
|
|
self.decoder.load_state_dict(_del_prefix(params['decoder']), strict=True) |
|
|
self.text_encoder.load_state_dict(_del_prefix(params['text_encoder']), strict=True) |
|
|
self.predictor_encoder.load_state_dict(_del_prefix(params['predictor_encoder']), strict=True) |
|
|
self.style_encoder.load_state_dict(_del_prefix(params['style_encoder']), strict=True) |
|
|
|
|
|
|
|
|
for n, p in self.named_parameters(): |
|
|
p.requires_grad = False |
|
|
self.eval() |
|
|
|
|
|
|
|
|
def device(self): |
|
|
return self.style_encoder.unshared.weight.device |
|
|
|
|
|
def compute_style(self, wav_file=None): |
|
|
|
|
|
x, sr = librosa.load(wav_file, sr=24000) |
|
|
x, _ = librosa.effects.trim(x, top_db=30) |
|
|
if sr != 24000: |
|
|
x = librosa.resample(x, sr, 24000) |
|
|
|
|
|
x = torch.from_numpy(x[None, :]).to(device=self.device(), |
|
|
dtype=torch.float) |
|
|
mel_tensor = (torch.log(1e-5 + self.mel_spec(x)) + 4) / 4 |
|
|
|
|
|
ref_s = self.style_encoder(mel_tensor) |
|
|
ref_p = self.predictor_encoder(mel_tensor) |
|
|
s = torch.cat([ref_s, ref_p], dim=3) |
|
|
s = s[:, :, 0, :].transpose(1, 2) |
|
|
return s |
|
|
|
|
|
def inference(self, |
|
|
text, |
|
|
ref_s=None): |
|
|
'''text may become too long when phonemized''' |
|
|
|
|
|
if isinstance(ref_s, str): |
|
|
ref_s = self.compute_style(ref_s) |
|
|
else: |
|
|
pass |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if isinstance(text, str): |
|
|
|
|
|
_translator = str.maketrans('', '', string.punctuation) |
|
|
|
|
|
text = [sub_sent.translate(_translator) + '.' for sub_sent in textwrap.wrap(text, 74)] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
device = ref_s.device |
|
|
total = [] |
|
|
for _t in text: |
|
|
|
|
|
_t = global_phonemizer.phonemize([_t]) |
|
|
_t = word_tokenize(_t[0]) |
|
|
_t = ' '.join(_t) |
|
|
|
|
|
tokens = textclenaer(_t)[:MAX_PHONEMES] + [4] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokens.insert(0, 0) |
|
|
tokens = torch.LongTensor(tokens).to(device).unsqueeze(0) |
|
|
with torch.no_grad(): |
|
|
hidden_states = self.text_encoder(tokens) |
|
|
bert_dur = self.bert(tokens, attention_mask=torch.ones_like(tokens) |
|
|
).last_hidden_state |
|
|
d_en = self.bert_encoder(bert_dur).transpose(-1, -2) |
|
|
aln_trg, F0_pred, N_pred = self.predictor(d_en=d_en, s=ref_s[:, 128:, :]) |
|
|
asr = torch.bmm(aln_trg, hidden_states) |
|
|
asr = asr.transpose(1, 2) |
|
|
asr_new = torch.zeros_like(asr) |
|
|
asr_new[:, :, 0] = asr[:, :, 0] |
|
|
asr_new[:, :, 1:] = asr[:, :, 0:-1] |
|
|
asr = asr_new |
|
|
x = self.decoder(asr=asr, |
|
|
F0_curve=F0_pred, |
|
|
N=N_pred, |
|
|
s=ref_s[:, :128, :]) |
|
|
|
|
|
if x.shape[2] < 100: |
|
|
x = torch.zeros(1, 1, 1000, device=self.device()) |
|
|
|
|
|
|
|
|
x = x[..., 40:-4000] |
|
|
|
|
|
|
|
|
if x.shape[2] == 0: |
|
|
|
|
|
x = torch.zeros(1, 1, 1000, device=self.device()) |
|
|
total.append(x) |
|
|
|
|
|
|
|
|
total = torch.cat(total, 2) |
|
|
|
|
|
|
|
|
return total |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_padding(kernel_size, dilation=1): |
|
|
return int((kernel_size*dilation - dilation)/2) |
|
|
|
|
|
|
|
|
def _tile(x, |
|
|
length=None): |
|
|
x = x.repeat(1, 1, int(length / x.shape[2]) + 1)[:, :, :length] |
|
|
return x |
|
|
|
|
|
|
|
|
class AdaIN1d(nn.Module): |
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, style_dim, num_features): |
|
|
super().__init__() |
|
|
self.norm = nn.InstanceNorm1d(num_features, affine=False) |
|
|
self.fc = nn.Linear(style_dim, num_features*2) |
|
|
|
|
|
def forward(self, x, s): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
s = self.fc(s.transpose(1, 2)).transpose(1, 2) |
|
|
|
|
|
s = _tile(s, length=x.shape[2]) |
|
|
|
|
|
gamma, beta = torch.chunk(s, chunks=2, dim=1) |
|
|
return (1+gamma) * self.norm(x) + beta |
|
|
|
|
|
|
|
|
class AdaINResBlock1(torch.nn.Module): |
|
|
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64): |
|
|
super(AdaINResBlock1, self).__init__() |
|
|
self.convs1 = nn.ModuleList([ |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], |
|
|
padding=get_padding(kernel_size, dilation[0]))), |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], |
|
|
padding=get_padding(kernel_size, dilation[1]))), |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], |
|
|
padding=get_padding(kernel_size, dilation[2]))) |
|
|
]) |
|
|
|
|
|
|
|
|
self.convs2 = nn.ModuleList([ |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
|
padding=get_padding(kernel_size, 1))), |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
|
padding=get_padding(kernel_size, 1))), |
|
|
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, |
|
|
padding=get_padding(kernel_size, 1))) |
|
|
]) |
|
|
|
|
|
|
|
|
self.adain1 = nn.ModuleList([ |
|
|
AdaIN1d(style_dim, channels), |
|
|
AdaIN1d(style_dim, channels), |
|
|
AdaIN1d(style_dim, channels), |
|
|
]) |
|
|
|
|
|
self.adain2 = nn.ModuleList([ |
|
|
AdaIN1d(style_dim, channels), |
|
|
AdaIN1d(style_dim, channels), |
|
|
AdaIN1d(style_dim, channels), |
|
|
]) |
|
|
|
|
|
self.alpha1 = nn.ParameterList( |
|
|
[nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))]) |
|
|
self.alpha2 = nn.ParameterList( |
|
|
[nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))]) |
|
|
|
|
|
def forward(self, x, s): |
|
|
for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2): |
|
|
xt = n1(x, s) |
|
|
xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) |
|
|
xt = c1(xt) |
|
|
xt = n2(xt, s) |
|
|
xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) |
|
|
xt = c2(xt) |
|
|
x = xt + x |
|
|
return x |
|
|
|
|
|
|
|
|
class SourceModuleHnNSF(torch.nn.Module): |
|
|
|
|
|
def __init__(self): |
|
|
|
|
|
super().__init__() |
|
|
self.harmonic_num = 8 |
|
|
self.l_linear = torch.nn.Linear(self.harmonic_num + 1, 1) |
|
|
self.upsample_scale = 300 |
|
|
|
|
|
|
|
|
def forward(self, x): |
|
|
|
|
|
x = torch.multiply(x, torch.FloatTensor( |
|
|
[[range(1, self.harmonic_num + 2)]]).to(x.device)) |
|
|
|
|
|
|
|
|
rad_values = x / 25647 |
|
|
|
|
|
rad_values = rad_values % 1 |
|
|
rad_values = F.interpolate(rad_values.transpose(1, 2), |
|
|
scale_factor=1/self.upsample_scale, |
|
|
mode='linear').transpose(1, 2) |
|
|
|
|
|
|
|
|
phase = torch.cumsum(rad_values, dim=1) * 1.84 * np.pi |
|
|
phase = F.interpolate(phase.transpose(1, 2) * self.upsample_scale, |
|
|
scale_factor=self.upsample_scale, mode='linear').transpose(1, 2) |
|
|
x = .009 * phase.sin() |
|
|
|
|
|
x = self.l_linear(x).tanh() |
|
|
return x |
|
|
|
|
|
|
|
|
class Generator(torch.nn.Module): |
|
|
def __init__(self, |
|
|
style_dim, |
|
|
resblock_kernel_sizes, |
|
|
upsample_rates, |
|
|
upsample_initial_channel, |
|
|
resblock_dilation_sizes, |
|
|
upsample_kernel_sizes): |
|
|
super(Generator, self).__init__() |
|
|
self.num_kernels = len(resblock_kernel_sizes) |
|
|
self.num_upsamples = len(upsample_rates) |
|
|
self.m_source = SourceModuleHnNSF() |
|
|
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) |
|
|
self.noise_convs = nn.ModuleList() |
|
|
self.ups = nn.ModuleList() |
|
|
self.noise_res = nn.ModuleList() |
|
|
|
|
|
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): |
|
|
c_cur = upsample_initial_channel // (2 ** (i + 1)) |
|
|
|
|
|
self.ups.append(weight_norm(ConvTranspose1d(upsample_initial_channel//(2**i), |
|
|
upsample_initial_channel//( |
|
|
2**(i+1)), |
|
|
k, u, padding=(u//2 + u % 2), output_padding=u % 2))) |
|
|
|
|
|
if i + 1 < len(upsample_rates): |
|
|
stride_f0 = np.prod(upsample_rates[i + 1:]) |
|
|
self.noise_convs.append(Conv1d( |
|
|
1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2)) |
|
|
self.noise_res.append(AdaINResBlock1( |
|
|
c_cur, 7, [1, 3, 5], style_dim)) |
|
|
else: |
|
|
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) |
|
|
self.noise_res.append(AdaINResBlock1( |
|
|
c_cur, 11, [1, 3, 5], style_dim)) |
|
|
|
|
|
self.resblocks = nn.ModuleList() |
|
|
|
|
|
self.alphas = nn.ParameterList() |
|
|
self.alphas.append(nn.Parameter( |
|
|
torch.ones(1, upsample_initial_channel, 1))) |
|
|
|
|
|
for i in range(len(self.ups)): |
|
|
ch = upsample_initial_channel//(2**(i+1)) |
|
|
self.alphas.append(nn.Parameter(torch.ones(1, ch, 1))) |
|
|
|
|
|
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): |
|
|
self.resblocks.append(AdaINResBlock1(ch, k, d, style_dim)) |
|
|
|
|
|
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) |
|
|
|
|
|
def forward(self, x, s, f0): |
|
|
|
|
|
|
|
|
f0 = self.f0_upsamp(f0).transpose(1, 2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
har_source = self.m_source(f0) |
|
|
|
|
|
har_source = har_source.transpose(1, 2) |
|
|
|
|
|
for i in range(self.num_upsamples): |
|
|
|
|
|
x = x + (1 / self.alphas[i]) * (torch.sin(self.alphas[i] * x) ** 2) |
|
|
x_source = self.noise_convs[i](har_source) |
|
|
x_source = self.noise_res[i](x_source, s) |
|
|
|
|
|
x = self.ups[i](x) |
|
|
|
|
|
x = x + x_source |
|
|
|
|
|
xs = None |
|
|
for j in range(self.num_kernels): |
|
|
|
|
|
if xs is None: |
|
|
xs = self.resblocks[i*self.num_kernels+j](x, s) |
|
|
else: |
|
|
xs += self.resblocks[i*self.num_kernels+j](x, s) |
|
|
x = xs / self.num_kernels |
|
|
|
|
|
x = self.conv_post(x) |
|
|
x = torch.tanh(x) |
|
|
|
|
|
return x |
|
|
|
|
|
class AdainResBlk1d(nn.Module): |
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2), |
|
|
upsample='none', dropout_p=0.0): |
|
|
super().__init__() |
|
|
self.actv = actv |
|
|
self.upsample_type = upsample |
|
|
self.upsample = UpSample1d(upsample) |
|
|
self.learned_sc = dim_in != dim_out |
|
|
self._build_weights(dim_in, dim_out, style_dim) |
|
|
if upsample == 'none': |
|
|
self.pool = nn.Identity() |
|
|
else: |
|
|
self.pool = weight_norm(nn.ConvTranspose1d( |
|
|
dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1)) |
|
|
|
|
|
def _build_weights(self, dim_in, dim_out, style_dim): |
|
|
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1)) |
|
|
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1)) |
|
|
self.norm1 = AdaIN1d(style_dim, dim_in) |
|
|
self.norm2 = AdaIN1d(style_dim, dim_out) |
|
|
if self.learned_sc: |
|
|
self.conv1x1 = weight_norm( |
|
|
nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False)) |
|
|
|
|
|
def _shortcut(self, x): |
|
|
x = self.upsample(x) |
|
|
if self.learned_sc: |
|
|
x = self.conv1x1(x) |
|
|
return x |
|
|
|
|
|
def _residual(self, x, s): |
|
|
x = self.norm1(x, s) |
|
|
x = self.actv(x) |
|
|
x = self.pool(x) |
|
|
x = self.conv1(x) |
|
|
x = self.norm2(x, s) |
|
|
x = self.actv(x) |
|
|
x = self.conv2(x) |
|
|
return x |
|
|
|
|
|
def forward(self, x, s): |
|
|
out = self._residual(x, s) |
|
|
out = (out + self._shortcut(x)) / math.sqrt(2) |
|
|
return out |
|
|
|
|
|
|
|
|
class UpSample1d(nn.Module): |
|
|
def __init__(self, layer_type): |
|
|
super().__init__() |
|
|
self.layer_type = layer_type |
|
|
|
|
|
def forward(self, x): |
|
|
if self.layer_type == 'none': |
|
|
return x |
|
|
else: |
|
|
return F.interpolate(x, scale_factor=2, mode='nearest-exact') |
|
|
|
|
|
|
|
|
class Decoder(nn.Module): |
|
|
def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80, |
|
|
resblock_kernel_sizes=[3, 7, 11], |
|
|
upsample_rates=[10, 5, 3, 2], |
|
|
upsample_initial_channel=512, |
|
|
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], |
|
|
upsample_kernel_sizes=[20, 10, 6, 4]): |
|
|
super().__init__() |
|
|
|
|
|
self.decode = nn.ModuleList() |
|
|
|
|
|
self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim) |
|
|
|
|
|
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) |
|
|
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) |
|
|
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim)) |
|
|
self.decode.append(AdainResBlk1d( |
|
|
1024 + 2 + 64, 512, style_dim, upsample=True)) |
|
|
|
|
|
self.F0_conv = weight_norm( |
|
|
nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) |
|
|
|
|
|
self.N_conv = weight_norm( |
|
|
nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1)) |
|
|
|
|
|
self.asr_res = nn.Sequential( |
|
|
weight_norm(nn.Conv1d(512, 64, kernel_size=1)), |
|
|
) |
|
|
|
|
|
self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates, |
|
|
upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes) |
|
|
|
|
|
def forward(self, asr=None, F0_curve=None, N=None, s=None): |
|
|
|
|
|
|
|
|
F0 = self.F0_conv(F0_curve) |
|
|
N = self.N_conv(N) |
|
|
|
|
|
|
|
|
x = torch.cat([asr, F0, N], axis=1) |
|
|
|
|
|
x = self.encode(x, s) |
|
|
|
|
|
asr_res = self.asr_res(asr) |
|
|
|
|
|
res = True |
|
|
for block in self.decode: |
|
|
if res: |
|
|
|
|
|
x = torch.cat([x, asr_res, F0, N], axis=1) |
|
|
|
|
|
x = block(x, s) |
|
|
if block.upsample_type != "none": |
|
|
res = False |
|
|
|
|
|
x = self.generator(x, s, F0_curve) |
|
|
return x |
|
|
|
|
|
|
|
|
class MelSpec(torch.nn.Module): |
|
|
|
|
|
def __init__(self, |
|
|
sample_rate=17402, |
|
|
n_fft=2048, |
|
|
win_length=1200, |
|
|
hop_length=300, |
|
|
n_mels=80 |
|
|
): |
|
|
'''avoids dependency on torchaudio''' |
|
|
super().__init__() |
|
|
self.n_fft = n_fft |
|
|
self.win_length = win_length if win_length is not None else n_fft |
|
|
self.hop_length = hop_length if hop_length is not None else self.win_length // 2 |
|
|
|
|
|
f_min = 0.0 |
|
|
f_max = float(sample_rate // 2) |
|
|
all_freqs = torch.linspace(0, sample_rate // 2, n_fft//2+1) |
|
|
m_min = 2595.0 * math.log10(1.0 + (f_min / 700.0)) |
|
|
m_max = 2595.0 * math.log10(1.0 + (f_max / 700.0)) |
|
|
m_pts = torch.linspace(m_min, m_max, n_mels + 2) |
|
|
f_pts = 700.0 * (10 ** (m_pts / 2595.0) - 1.0) |
|
|
f_diff = f_pts[1:] - f_pts[:-1] |
|
|
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) |
|
|
zero = torch.zeros(1) |
|
|
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] |
|
|
up_slopes = slopes[:, 2:] / f_diff[1:] |
|
|
fb = torch.max(zero, torch.min(down_slopes, up_slopes)) |
|
|
|
|
|
self.register_buffer('fb', fb, persistent=False) |
|
|
window = torch.hann_window(self.win_length) |
|
|
self.register_buffer('window', window, persistent=False) |
|
|
|
|
|
def forward(self, x): |
|
|
spec_f = torch.stft(x, |
|
|
self.n_fft, |
|
|
self.hop_length, |
|
|
self.win_length, |
|
|
self.window, |
|
|
center=True, |
|
|
pad_mode="reflect", |
|
|
normalized=False, |
|
|
onesided=True, |
|
|
return_complex=True) |
|
|
mel_specgram = torch.matmul(spec_f.abs().pow(2).transpose(1, 2), self.fb).transpose(1, 2) |
|
|
return mel_specgram[:, None, :, :] |
|
|
|
|
|
|
|
|
class LearnedDownSample(nn.Module): |
|
|
def __init__(self, dim_in): |
|
|
super().__init__() |
|
|
self.conv = spectral_norm(nn.Conv2d(dim_in, dim_in, kernel_size=( |
|
|
3, 3), stride=(2, 2), groups=dim_in, padding=1)) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.conv(x) |
|
|
|
|
|
|
|
|
class ResBlk(nn.Module): |
|
|
def __init__(self, |
|
|
dim_in, dim_out): |
|
|
super().__init__() |
|
|
self.actv = nn.LeakyReLU(0.2) |
|
|
self.downsample_res = LearnedDownSample(dim_in) |
|
|
self.learned_sc = dim_in != dim_out |
|
|
self.conv1 = spectral_norm(nn.Conv2d(dim_in, dim_in, 3, 1, 1)) |
|
|
self.conv2 = spectral_norm(nn.Conv2d(dim_in, dim_out, 3, 1, 1)) |
|
|
if self.learned_sc: |
|
|
self.conv1x1 = spectral_norm( |
|
|
nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)) |
|
|
|
|
|
def _shortcut(self, x): |
|
|
if self.learned_sc: |
|
|
x = self.conv1x1(x) |
|
|
if x.shape[3] % 2 != 0: |
|
|
x = torch.cat([x, x[:, :, :, -1:]], dim=3) |
|
|
return F.interpolate(x, scale_factor=.5, mode='nearest-exact') |
|
|
|
|
|
def _residual(self, x): |
|
|
x = self.actv(x) |
|
|
x = self.conv1(x) |
|
|
x = self.downsample_res(x) |
|
|
x = self.actv(x) |
|
|
x = self.conv2(x) |
|
|
return x |
|
|
|
|
|
def forward(self, x): |
|
|
x = self._shortcut(x) + self._residual(x) |
|
|
return x / math.sqrt(2) |
|
|
|
|
|
|
|
|
class StyleEncoder(nn.Module): |
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, |
|
|
dim_in=64, |
|
|
style_dim=128, |
|
|
max_conv_dim=512): |
|
|
super().__init__() |
|
|
blocks = [spectral_norm(nn.Conv2d(1, dim_in, 3, stride=1, padding=1))] |
|
|
for _ in range(4): |
|
|
dim_out = min(dim_in * 2, |
|
|
max_conv_dim) |
|
|
blocks += [ResBlk(dim_in, dim_out)] |
|
|
dim_in = dim_out |
|
|
blocks += [nn.LeakyReLU(0.24), |
|
|
spectral_norm(nn.Conv2d(dim_out, dim_out, 5, stride=1, padding=0)), |
|
|
nn.LeakyReLU(0.2) |
|
|
] |
|
|
self.shared = nn.Sequential(*blocks) |
|
|
self.unshared = nn.Linear(dim_out, style_dim) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.shared(x) |
|
|
x = x.mean(3, keepdims=True) |
|
|
x = x.transpose(1, 3) |
|
|
s = self.unshared(x) |
|
|
return s |
|
|
|
|
|
|
|
|
class LinearNorm(torch.nn.Module): |
|
|
def __init__(self, in_dim, out_dim, bias=True): |
|
|
super().__init__() |
|
|
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.linear_layer(x) |
|
|
|
|
|
|
|
|
class LayerNorm(nn.Module): |
|
|
def __init__(self, channels, eps=1e-5): |
|
|
super().__init__() |
|
|
self.channels = channels |
|
|
self.eps = eps |
|
|
|
|
|
self.gamma = nn.Parameter(torch.ones(channels)) |
|
|
self.beta = nn.Parameter(torch.zeros(channels)) |
|
|
|
|
|
def forward(self, x): |
|
|
x = x.transpose(1, -1) |
|
|
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) |
|
|
return x.transpose(1, -1) |
|
|
|
|
|
|
|
|
class TextEncoder(nn.Module): |
|
|
def __init__(self, channels, kernel_size, depth, n_symbols): |
|
|
super().__init__() |
|
|
self.embedding = nn.Embedding(n_symbols, channels) |
|
|
padding = (kernel_size - 1) // 2 |
|
|
self.cnn = nn.ModuleList() |
|
|
for _ in range(depth): |
|
|
self.cnn.append(nn.Sequential( |
|
|
weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)), |
|
|
LayerNorm(channels), |
|
|
nn.LeakyReLU(0.24)) |
|
|
) |
|
|
self.lstm = nn.LSTM(channels, channels//2, 1, |
|
|
batch_first=True, bidirectional=True) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.embedding(x) |
|
|
x = x.transpose(1, 2) |
|
|
for c in self.cnn: |
|
|
x = c(x) |
|
|
x = x.transpose(1, 2) |
|
|
x, _ = self.lstm(x) |
|
|
return x |
|
|
|
|
|
|
|
|
class AdaLayerNorm(nn.Module): |
|
|
|
|
|
def __init__(self, style_dim, channels=None, eps=1e-5): |
|
|
super().__init__() |
|
|
self.eps = eps |
|
|
self.fc = nn.Linear(style_dim, 1024) |
|
|
|
|
|
def forward(self, x, s): |
|
|
h = self.fc(s) |
|
|
gamma = h[:, :, :512] |
|
|
beta = h[:, :, 512:1024] |
|
|
x = F.layer_norm(x, (512, ), eps=self.eps) |
|
|
x = (1 + gamma) * x + beta |
|
|
return x |
|
|
|
|
|
|
|
|
class ProsodyPredictor(nn.Module): |
|
|
|
|
|
def __init__(self, style_dim, d_hid, nlayers, max_dur=50): |
|
|
super().__init__() |
|
|
|
|
|
self.text_encoder = DurationEncoder(sty_dim=style_dim, |
|
|
d_model=d_hid, |
|
|
nlayers=nlayers) |
|
|
self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, |
|
|
1, batch_first=True, bidirectional=True) |
|
|
self.duration_proj = LinearNorm(d_hid, max_dur) |
|
|
self.shared = nn.LSTM(d_hid + style_dim, d_hid // |
|
|
2, 1, batch_first=True, bidirectional=True) |
|
|
self.F0 = nn.ModuleList([ |
|
|
AdainResBlk1d(d_hid, d_hid, style_dim), |
|
|
AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True), |
|
|
AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim), |
|
|
]) |
|
|
self.N = nn.ModuleList([ |
|
|
AdainResBlk1d(d_hid, d_hid, style_dim), |
|
|
AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True), |
|
|
AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim) |
|
|
]) |
|
|
self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) |
|
|
self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0) |
|
|
|
|
|
def F0Ntrain(self, x, s): |
|
|
|
|
|
x, _ = self.shared(x) |
|
|
|
|
|
x = x.transpose(1, 2) |
|
|
|
|
|
F0 = x |
|
|
|
|
|
for block in self.F0: |
|
|
|
|
|
|
|
|
|
|
|
F0 = block(F0, s) |
|
|
F0 = self.F0_proj(F0) |
|
|
|
|
|
N = x |
|
|
|
|
|
for block in self.N: |
|
|
N = block(N, s) |
|
|
N = self.N_proj(N) |
|
|
|
|
|
return F0, N |
|
|
|
|
|
def forward(self, d_en=None, s=None): |
|
|
blend = self.text_encoder(d_en, s) |
|
|
x, _ = self.lstm(blend) |
|
|
dur = self.duration_proj(x) |
|
|
|
|
|
_, input_length, classifier_50 = dur.shape |
|
|
|
|
|
dur = dur[0, :, :] |
|
|
dur = torch.sigmoid(dur).sum(1) |
|
|
dur = dur.round().clamp(min=1).to(torch.int64) |
|
|
aln_trg = torch.zeros(1, |
|
|
dur.sum(), |
|
|
input_length, |
|
|
device=s.device) |
|
|
c_frame = 0 |
|
|
for i in range(input_length): |
|
|
aln_trg[:, c_frame:c_frame + dur[i], i] = 1 |
|
|
c_frame += dur[i] |
|
|
en = torch.bmm(aln_trg, blend) |
|
|
F0_pred, N_pred = self.F0Ntrain(en, s) |
|
|
return aln_trg, F0_pred, N_pred |
|
|
|
|
|
|
|
|
class DurationEncoder(nn.Module): |
|
|
|
|
|
def __init__(self, sty_dim=128, d_model=512, nlayers=3): |
|
|
super().__init__() |
|
|
self.lstms = nn.ModuleList() |
|
|
for _ in range(nlayers): |
|
|
self.lstms.append(nn.LSTM(d_model + sty_dim, |
|
|
d_model // 2, |
|
|
num_layers=1, |
|
|
batch_first=True, |
|
|
bidirectional=True |
|
|
)) |
|
|
self.lstms.append(AdaLayerNorm(sty_dim, d_model)) |
|
|
|
|
|
|
|
|
def forward(self, x, style): |
|
|
|
|
|
_, _, input_lengths = x.shape |
|
|
|
|
|
style = _tile(style, length=x.shape[2]).transpose(1, 2) |
|
|
x = x.transpose(1, 2) |
|
|
|
|
|
for block in self.lstms: |
|
|
if isinstance(block, AdaLayerNorm): |
|
|
|
|
|
x = block(x, style) |
|
|
|
|
|
else: |
|
|
x = torch.cat([x, style], axis=2) |
|
|
|
|
|
|
|
|
x,_ = block(x) |
|
|
|
|
|
return torch.cat([x, style], axis=2) |
|
|
|