pytorch task04动手学pytorch-机器翻译

Celeste ·
更新时间:2024-09-21
· 552 次阅读

pytorch task04动手学pytorch-机器翻译 文章目录pytorch task04动手学pytorch-机器翻译1. 机器翻译与数据集1.1数据集预处理1.2 创建dataloader2. Encoder Decoder3. Sequence to Sequence3.1 结构3.2 代码实现4. 实验5.注意力机制5.1注意力机制框架5.2 实验部分 1. 机器翻译与数据集

机器翻译(MT):将一段文本从一种语言自动翻译为另一种语言,用神经网络解决这个问题通常称为神经机器翻译(NMT)。
主要特征:输出是单词序列而不是单个单词。 输出序列的长度可能与源序列的长度不同。
数据集采用 http://www.manythings.org/anki/ 的fra-eng数据集

1.1数据集预处理 #数据字典 char to index and index to char class Vocab(object): def __init__(self, tokens, min_freq=0, use_special_tokens=False): counter = collections.Counter(tokens) self.token_freqs = list(counter.items()) self.idx_to_token = [] if use_special_tokens: # padding, begin of sentence, end of sentence, unknown self.pad, self.bos, self.eos, self.unk = (0, 1, 2, 3) self.idx_to_token += ['', '', '', ''] else: self.unk = 0 self.idx_to_token += [''] self.idx_to_token += [token for token, freq in self.token_freqs if freq >= min_freq and token not in self.idx_to_token] self.token_to_idx = dict() for idx, token in enumerate(self.idx_to_token): self.token_to_idx[token] = idx def __len__(self): return len(self.idx_to_token) def __getitem__(self, tokens): if not isinstance(tokens, (list, tuple)): return self.token_to_idx.get(tokens, self.unk) return [self.__getitem__(token) for token in tokens] def to_tokens(self, indices): if not isinstance(indices, (list, tuple)): return self.idx_to_token[indices] return [self.idx_to_token[index] for index in indices] #数据清洗, tokenize, 建立数据字典 class TextPreprocessor(): def __init__(self, text, num_lines): self.num_lines = num_lines text = self.clean_raw_text(text) self.src_tokens, self.tar_tokens = self.tokenize(text) self.src_vocab = self.build_vocab(self.src_tokens) self.tar_vocab = self.build_vocab(self.tar_tokens) def clean_raw_text(self, text): text = text.replace('\u202f', ' ').replace('\xa0', ' ') out = '' for i, char in enumerate(text.lower()): if char in (',', '!', '.') and i > 0 and text[i-1] != ' ': out += ' ' out += char return out def tokenize(self, text): sources, targets = [], [] for i, line in enumerate(text.split('\n')): if i > self.num_lines: break parts = line.split('\t') if len(parts) >= 2: sources.append(parts[0].split(' ')) targets.append(parts[1].split(' ')) return sources, targets def build_vocab(self, tokens): tokens = [token for line in tokens for token in line] return Vocab(tokens, min_freq=3, use_special_tokens=True) 1.2 创建dataloader # pad, 构建数据dataset, 创建dataloader class TextUtil(): def __init__(self, tp, max_len): self.src_vocab, self.tar_vocab = tp.src_vocab, tp.tar_vocab src_arr, src_valid_len = self.build_array(tp.src_tokens, tp.src_vocab, max_len = max_len, padding_token = tp.src_vocab.pad, is_source=True) tar_arr, tar_valid_len = self.build_array(tp.tar_tokens, tp.tar_vocab, max_len = max_len, padding_token = tp.tar_vocab.pad, is_source=False) self.dataset = torch.utils.data.TensorDataset(src_arr, src_valid_len, tar_arr, tar_valid_len) def build_array(self,lines, vocab, max_len, padding_token, is_source): def _pad(line): if len(line) > max_len: return line[:max_len] else: return line + (max_len - len(line)) * [padding_token] lines = [vocab[line] for line in lines] if not is_source: lines = [[vocab.bos] + line + [vocab.eos] for line in lines] arr = torch.tensor([_pad(line) for line in lines]) valid_len = (arr != vocab.pad).sum(1) return arr, valid_len def load_data_nmt(self, batch_size): train_loader = torch.utils.data.DataLoader(self.dataset, batch_size, shuffle = True) return self.src_vocab, self.tar_vocab, train_loader 2. Encoder Decoder

encoder:输入到隐藏状态
decoder:隐藏状态到输出

Image Name

3. Sequence to Sequence 3.1 结构

训练
Image Name
预测

Image Name

具体结构:
Image Name

3.2 代码实现 class Encoder(nn.Module): def __init__(self,**kwargs): super(Encoder, self).__init__(**kwargs) def forward(self, X, *args): raise NotImplementedError class Decoder(nn.Module): def __init__(self, **kwargs): super(Decoder, self).__init__(**kwargs) def init_state(self, encoded_state, *args): raise NotImplementedError def forward(self, X, state): raise NotImplementedError class EncoderDecoder(nn.Module): def __init__(self, encoder, decoder, **kwargs): super(EncoderDecoder, self).__init__(**kwargs) self.encoder = encoder self.decoder = decoder def forward(self, enc_X, dec_X, *args): encoded_state = self.encoder(enc_X, *args)[1] decoded_state = self.decoder.init_state(encoded_state, *args) return self.decoder(dec_X, decoded_state) class Seq2SeqEncoder(Encoder): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqEncoder, self).__init__(**kwargs) self.num_hiddens = num_hiddens self.num_layers = num_layers self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = nn.LSTM(embed_size, num_hiddens, num_layers, dropout=dropout) def begin_state(self, batch_size, device): H = torch.zeros(size=(self.num_layers, batch_size, self.num_hiddens), device=device) C = torch.zeros(size=(self.num_layers, batch_size, self.num_hiddens), device=device) return (H, C) def forward(self, X, *args): X = self.embedding(X) X = X.transpose(0, 1) out, state = self.rnn(X) return out, state class Seq2SeqDecoder(Decoder): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqDecoder, self).__init__(**kwargs) self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = nn.LSTM(embed_size, num_hiddens, num_layers, dropout=dropout) self.dense = nn.Linear(num_hiddens, vocab_size) def init_state(self, encoded_state, *args): return encoded_state def forward(self, X, state): X = self.embedding(X).transpose(0, 1) out, state = self.rnn(X, state) out = self.dense(out).transpose(0, 1) return out, state def grad_clipping(params, theta, device): """Clip the gradient.""" norm = torch.tensor([0], dtype=torch.float32, device=device) for param in params: norm += (param.grad ** 2).sum() norm = norm.sqrt().item() if norm > theta: for param in params: param.grad.data.mul_(theta / norm) def grad_clipping_nn(model, theta, device): """Clip the gradient for a nn model.""" grad_clipping(model.parameters(), theta, device) class MaskedSoftmaxCELoss(nn.CrossEntropyLoss): def get_mask(self, X, valid_len, value=0): max_len = X.size(1) mask = torch.arange(max_len)[None, :].to(valid_len.device) < valid_len[:, None] X[~mask] = value return X def forward(self, pred, label, valid_len): weights = torch.ones_like(label) weights = self.get_mask(weights, valid_len) self.reduction = 'none' output = super(MaskedSoftmaxCELoss, self).forward(pred.transpose(1,2), label) return (output * weights).mean(dim=1) 4. 实验 #训练函数 def train(model, data_iter, lr, num_epochs, device): # Saved in d2l model.to(device) optimizer = optim.Adam(model.parameters(), lr=lr) loss = MaskedSoftmaxCELoss() tic = time.time() for epoch in range(1, num_epochs+1): l_sum, num_tokens_sum = 0.0, 0.0 for batch in data_iter: optimizer.zero_grad() X, X_vlen, Y, Y_vlen = [x.to(device) for x in batch] Y_input, Y_label, Y_vlen = Y[:,:-1], Y[:,1:], Y_vlen-1 Y_hat, _ = model(X, Y_input, X_vlen, Y_vlen) l = loss(Y_hat, Y_label, Y_vlen).sum() l.backward() with torch.no_grad(): grad_clipping_nn(model, 5, device) num_tokens = Y_vlen.sum().item() optimizer.step() l_sum += l.sum().item() num_tokens_sum += num_tokens if epoch % 10 == 0: print("epoch {0:4d},loss {1:.3f}, time {2:.1f} sec".format( epoch, (l_sum/num_tokens_sum), time.time()-tic)) tic = time.time() #测试函数 def translate(model, src_sentence, src_vocab, tgt_vocab, max_len, device): src_tokens = src_vocab[src_sentence.lower().split(' ')] src_len = len(src_tokens) if src_len ' + translate_ch7( model, sentence, src_vocab, tgt_vocab, max_len, ctx)) Go . => va ! Wow ! => ! I'm OK . => ça va . I won ! => j'ai gagné ! 5.注意力机制

在“编码器—解码器(seq2seq)”⼀节⾥,解码器在各个时间步依赖相同的背景变量(context vector)来获取输⼊序列信息。当编码器为循环神经⽹络时,背景变量来⾃它最终时间步的隐藏状态。将源序列输入信息以循环单位状态编码,然后将其传递给解码器以生成目标序列。然而这种结构存在着问题,尤其是RNN机制实际中存在长程梯度消失的问题,对于较长的句子,我们很难寄希望于将输入的序列转化为定长的向量而保存所有的有效信息,所以随着所需翻译句子的长度的增加,这种结构的效果会显著下降。

与此同时,解码的目标词语可能只与原输入的部分词语有关,而并不是与所有的输入有关。例如,当把“Hello world”翻译成“Bonjour le monde”时,“Hello”映射成“Bonjour”,“world”映射成“monde”。在seq2seq模型中,解码器只能隐式地从编码器的最终状态中选择相应的信息。然而,注意力机制可以将这种选择过程显式地建模。

Image Name

5.1注意力机制框架

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-JIqOTs2U-1582014373453)(C:\Users\chenlang\AppData\Roaming\Typora\typora-user-images\image-20200218160142253.png)]

Image Name

5.2 实验部分 import math import torch import torch.nn as nn import os def file_name_walk(file_dir): for root, dirs, files in os.walk(file_dir): # print("root", root) # 当前目录路径 print("dirs", dirs) # 当前路径下所有子目录 print("files", files) # 当前路径下所有非目录子文件 file_name_walk("/home/kesci/input/fraeng6506") dirs [] files ['_about.txt', 'fra.txt']

Softmax屏蔽

在深入研究实现之前,我们首先介绍softmax操作符的一个屏蔽操作。

def SequenceMask(X, X_len,value=-1e6): maxlen = X.size(1) #print(X.size(),torch.arange((maxlen),dtype=torch.float)[None, :],'\n',X_len[:, None] ) mask = torch.arange((maxlen),dtype=torch.float)[None, :] >= X_len[:, None] #print(mask) X[mask]=value return X def masked_softmax(X, valid_length): # X: 3-D tensor, valid_length: 1-D or 2-D tensor softmax = nn.Softmax(dim=-1) if valid_length is None: return softmax(X) else: shape = X.shape if valid_length.dim() == 1: try: valid_length = torch.FloatTensor(valid_length.numpy().repeat(shape[1], axis=0))#[2,2,3,3] except: valid_length = torch.FloatTensor(valid_length.cpu().numpy().repeat(shape[1], axis=0))#[2,2,3,3] else: valid_length = valid_length.reshape((-1,)) # fill masked elements with a large negative, whose exp is 0 X = SequenceMask(X.reshape((-1, shape[-1])), valid_length) return softmax(X).reshape(shape) masked_softmax(torch.rand((2,2,4),dtype=torch.float), torch.FloatTensor([2,3]))

输出结果:

tensor([[[0.5423, 0.4577, 0.0000, 0.0000], [0.5290, 0.4710, 0.0000, 0.0000]], [[0.2969, 0.2966, 0.4065, 0.0000], [0.3607, 0.2203, 0.4190, 0.0000]]])

超出2维矩阵的乘法

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-b0619Gdq-1582014373457)(C:\Users\chenlang\AppData\Roaming\Typora\typora-user-images\image-20200218160725722.png)]

输入:

torch.bmm(torch.ones((2,1,3), dtype = torch.float), torch.ones((2,3,2), dtype = torch.float))

输出:

tensor([[[3., 3.]], [[3., 3.]]]) [0.5290, 0.4710, 0.0000, 0.0000]], [[0.2969, 0.2966, 0.4065, 0.0000], [0.3607, 0.2203, 0.4190, 0.0000]]]) **超出2维矩阵的乘法** [外链图片转存中...(img-b0619Gdq-1582014373457)] 输入: ```python torch.bmm(torch.ones((2,1,3), dtype = torch.float), torch.ones((2,3,2), dtype = torch.float))

输出:

tensor([[[3., 3.]], [[3., 3.]]])
作者:Tiger歌儿



pytorch 机器翻译

需要 登录 后方可回复, 如果你还没有账号请 注册新账号