xref: /aosp_15_r20/external/libopus/dnn/torch/lossgen/train_lossgen.py (revision a58d3d2adb790c104798cd88c8a3aff4fa8b82cc)
1import numpy as np
2import torch
3from torch import nn
4import torch.nn.functional as F
5import tqdm
6from scipy.signal import lfilter
7import os
8import lossgen
9
10class LossDataset(torch.utils.data.Dataset):
11    def __init__(self,
12                loss_file,
13                sequence_length=997):
14
15        self.sequence_length = sequence_length
16
17        self.loss = np.loadtxt(loss_file, dtype='float32')
18
19        self.nb_sequences = self.loss.shape[0]//self.sequence_length
20        self.loss = self.loss[:self.nb_sequences*self.sequence_length]
21        self.perc = lfilter(np.array([.001], dtype='float32'), np.array([1., -.999], dtype='float32'), self.loss)
22
23        self.loss = np.reshape(self.loss, (self.nb_sequences, self.sequence_length, 1))
24        self.perc = np.reshape(self.perc, (self.nb_sequences, self.sequence_length, 1))
25
26    def __len__(self):
27        return self.nb_sequences
28
29    def __getitem__(self, index):
30        r0 = np.random.normal(scale=.1, size=(1,1)).astype('float32')
31        r1 = np.random.normal(scale=.1, size=(self.sequence_length,1)).astype('float32')
32        perc = self.perc[index, :, :]
33        perc = perc + (r0+r1)*perc*(1-perc)
34        return [self.loss[index, :, :], perc]
35
36
37adam_betas = [0.8, 0.98]
38adam_eps = 1e-8
39batch_size=256
40lr_decay = 0.001
41lr = 0.003
42epsilon = 1e-5
43epochs = 2000
44checkpoint_dir='checkpoint'
45os.makedirs(checkpoint_dir, exist_ok=True)
46checkpoint = dict()
47
48device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
49
50checkpoint['model_args']    = ()
51checkpoint['model_kwargs']  = {'gru1_size': 16, 'gru2_size': 32}
52model = lossgen.LossGen(*checkpoint['model_args'], **checkpoint['model_kwargs'])
53dataset = LossDataset('loss_sorted.txt')
54dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=4)
55
56
57optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=adam_betas, eps=adam_eps)
58
59
60# learning rate scheduler
61scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=lambda x : 1 / (1 + lr_decay * x))
62
63
64if __name__ == '__main__':
65    model.to(device)
66    states = None
67    for epoch in range(1, epochs + 1):
68
69        running_loss = 0
70
71        print(f"training epoch {epoch}...")
72        with tqdm.tqdm(dataloader, unit='batch') as tepoch:
73            for i, (loss, perc) in enumerate(tepoch):
74                optimizer.zero_grad()
75                loss = loss.to(device)
76                perc = perc.to(device)
77
78                out, states = model(loss, perc, states=states)
79                states = [state.detach() for state in states]
80                out = torch.sigmoid(out[:,:-1,:])
81                target = loss[:,1:,:]
82
83                loss = torch.mean(-target*torch.log(out+epsilon) - (1-target)*torch.log(1-out+epsilon))
84
85                loss.backward()
86                optimizer.step()
87
88                scheduler.step()
89
90                running_loss += loss.detach().cpu().item()
91                tepoch.set_postfix(loss=f"{running_loss/(i+1):8.5f}",
92                                   )
93
94        # save checkpoint
95        checkpoint_path = os.path.join(checkpoint_dir, f'lossgen_{epoch}.pth')
96        checkpoint['state_dict'] = model.state_dict()
97        checkpoint['loss'] = running_loss / len(dataloader)
98        checkpoint['epoch'] = epoch
99        torch.save(checkpoint, checkpoint_path)
100