• Pytorch构建Transformer实现英文翻译


    1. # !/usr/bin/env Python3
    2. # -*- coding: utf-8 -*-
    3. # @version: v1.0
    4. # @Author : Meng Li
    5. # @contact: 925762221@qq.com
    6. # @FILE : torch_transformer.py
    7. # @Time : 2022/6/22 15:10
    8. # @Software : PyCharm
    9. # @site:
    10. # @Description : 自己实现的基于Encoder-Decoder的Transformer模型
    11. import torch
    12. import torch.nn as nn
    13. from torch.utils.data import Dataset, DataLoader
    14. import numpy as np
    15. import math
    16. torch.backends.cudnn.enabled = False
    17. dim = 64 # Q K V 矩阵的维度
    18. embed_size = 512
    19. batch_size = 2
    20. num_heads = 8
    21. num_layers = 6
    22. d_ff = 2048 # FeedForward dimension
    23. dropout = 0.5
    24. sentences = [
    25. # enc_input dec_input dec_output
    26. ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],
    27. ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']
    28. ]
    29. src_vocab_size = len(set(np.array([i[0].split(" ") for i in sentences]).flatten())) # 计算sentences中enc_input中单词最长的长度
    30. # 计算sentences中 dec_input + dec_output 中单词最长的长度
    31. dst_vocab_size = len(
    32. set(np.array([i[1].split(" ") for i in sentences] + [i[2].split(" ") for i in sentences]).flatten()))
    33. class my_dataset(Dataset):
    34. def __init__(self, enc_inputs, dec_inputs, dec_outputs):
    35. super(my_dataset, self).__init__()
    36. self.enc_inputs = enc_inputs
    37. self.dec_inputs = dec_inputs
    38. self.dec_outputs = dec_outputs
    39. def __getitem__(self, index):
    40. return self.enc_inputs[index], self.dec_inputs[index], self.dec_outputs[index]
    41. def __len__(self):
    42. return self.enc_inputs.size(0) # 返回张量的第一个维度
    43. def get_attn_pad_mask(seq_q, seq_k):
    44. """
    45. :param seq_q: seq_q -> [Batch_size, len_q]
    46. :param seq_k: seq_k -> [Batch_size, len_k]
    47. :return:
    48. """
    49. Batch_size, len_q = seq_q.size()
    50. Batch_size, len_k = seq_k.size()
    51. atten_mask = seq_k.eq(0).unsqueeze(1) # atten_mask -> [Batch_size, 1, len_k]
    52. atten_mask = atten_mask.expand(Batch_size, len_q, len_k) # atten_mask -> [Batch_size, len_q, len_k]
    53. return atten_mask
    54. def get_attn_subsequence_mask(seq):
    55. """
    56. seq: [batch_size, tgt_len]
    57. """
    58. attn_shape = [seq.size(0), seq.size(1), seq.size(1)]
    59. subsequence_mask = np.triu(np.ones(attn_shape), k=1) # Upper triangular matrix
    60. subsequence_mask = torch.from_numpy(subsequence_mask).byte()
    61. return subsequence_mask # [batch_size, tgt_len, tgt_len]
    62. def make_data(seq_data):
    63. """
    64. :param seq_data:
    65. :return: 返回的是三个张量:enc_inputs, dec_inputs, dec_outputs 张量维度分别是[Batch_size, seq_len, embed_size]
    66. seq_len是句子长度, embed_size是词汇表长度
    67. """
    68. src_vocab = [i[0].split(" ") for i in seq_data]
    69. src_vocab = set(np.array(src_vocab).flatten())
    70. target_vocab = [i[1].split(" ") for i in seq_data] + [i[2].split(" ") for i in seq_data]
    71. target_vocab = set(np.array(target_vocab).flatten())
    72. enc_input_all, dec_input_all, dec_output_all = [], [], []
    73. src_word2idx = {j: i for i, j in enumerate(src_vocab)}
    74. dst_word2idx = {j: i for i, j in enumerate(target_vocab)}
    75. for seq in seq_data:
    76. enc_input = [src_word2idx[n] for n in seq[0].split(" ")]
    77. dec_input = [dst_word2idx[i] for i in seq[1].split(" ")]
    78. dec_output = [dst_word2idx[i] for i in seq[2].split(" ")]
    79. enc_input_all.append(enc_input)
    80. dec_input_all.append(dec_input)
    81. dec_output_all.append(dec_output) # not one-hot
    82. # make tensor
    83. return torch.LongTensor(enc_input_all), torch.LongTensor(dec_input_all), torch.LongTensor(dec_output_all)
    84. enc_inputs, dec_inputs, dec_outputs = make_data(sentences)
    85. train_data = my_dataset(enc_inputs, dec_inputs, dec_outputs)
    86. train_data_iter = DataLoader(train_data, batch_size, shuffle=False)
    87. class PositionalEncoding(nn.Module):
    88. def __init__(self, d_model, dropout=0.1, max_len=5000):
    89. super(PositionalEncoding, self).__init__()
    90. self.dropout = nn.Dropout(p=dropout)
    91. pe = torch.zeros(max_len, d_model)
    92. position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
    93. div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
    94. pe[:, 0::2] = torch.sin(position * div_term)
    95. pe[:, 1::2] = torch.cos(position * div_term)
    96. pe = pe.unsqueeze(0).transpose(0, 1)
    97. self.register_buffer('pe', pe)
    98. def forward(self, x):
    99. """
    100. x: [seq_len, batch_size, d_model]
    101. """
    102. x = x + self.pe[:x.size(0), :]
    103. return self.dropout(x)
    104. class Multi_Head_Attention(nn.Module):
    105. def __init__(self):
    106. super().__init__()
    107. self.W_Q = nn.Linear(embed_size, dim * num_heads, bias=False) # 将输入矩阵映射为低维度
    108. self.W_K = nn.Linear(embed_size, dim * num_heads, bias=False) # 将输入矩阵映射为低维度
    109. self.W_V = nn.Linear(embed_size, dim * num_heads, bias=False) # 将输入矩阵映射为低维度
    110. self.projection = torch.nn.Linear(num_heads * dim, embed_size) # 将atten的维度转换为与输入的维度一致
    111. def forward(self, input_Q, input_K, input_V, atten_mask):
    112. """
    113. :param input_Q: -> [Batch_size, len_q, embedding_size]
    114. :param input_K: -> [Batch_size, len_k, embedding_size]
    115. :param input_V: -> [Batch_size, len_v(=len_k), embedding_size]
    116. :param atten_mask: -> [Batch_size, atten_len_k, atten_len_v]
    117. :return: 这里的dim是QKV矩阵的维度
    118. # 对输入求得Q、K、V三个矩阵,然后根据Q和K矩阵求得注意力矩阵,最后根据注意力矩阵求得经过Masked后的注意力矩阵
    119. # 返回的enc_inputs 和 atten 张量维度的一样的
    120. """
    121. torch.backends.cudnn.enabled = False
    122. residual = input_Q # [Batch_size, len_q, embedding_size] 这里是残差项,多层注意力的输出与此项相加
    123. _, len_q, embedding_size = input_Q.size()
    124. Batch_size, atten_len_k, atten_len_v = atten_mask.size()
    125. # 输入乘以矩阵得到Q、K、V矩阵
    126. Q = self.W_Q(input_Q) # Q -> [Batch_size, len_q, dim*num_heads]
    127. K = self.W_K(input_K) # K -> [Batch_size, len_k, dim*num_heads]
    128. V = self.W_V(input_V) # V -> [Batch_size, len_v, dim*num_heads]
    129. Q_Kt = torch.matmul(Q, K.transpose(-1, -2)) # QK_t -> [Batch_size, len_q, len_k]
    130. Q_Kt_div = Q_Kt / math.sqrt(dim)
    131. atten = torch.matmul(Q_Kt_div, V) # atten -> [Batch_size, len_q, dim*num_heads]
    132. atten = atten.unsqueeze(1) # atten -> [Batch_size, 1, len_q, dim*num_heads]
    133. atten = atten.view(Batch_size, -1, len_q, dim) # atten -> [Batch_size, num_heads, len_q, dim]
    134. atten = nn.Softmax(dim=-1)(atten) # [batch_size, n_heads, len_q]
    135. atten_mask = atten_mask.unsqueeze(1) # atten_mask -> [Batch_size, 1, atten_len_k, atten_len_v]
    136. # atten_mask -> [Batch_size, num_heads, atten_len_k, atten_len_v] 这里的 atten_len_v == len_q
    137. atten_mask = atten_mask.repeat(1, num_heads, 1, 1)
    138. atten = torch.matmul(atten_mask.float(), atten.float()) # atten -> [Batch_size, num_heads, atten_len_k, dim]
    139. atten = atten.transpose(1, 2) # atten -> [Batch_size, atten_len_k, num_heads, dim]
    140. atten = atten.reshape(Batch_size, atten_len_k, -1) # atten -> [Batch_size, atten_len_k, num_heads * dim]
    141. atten = self.projection(atten) # atten -> [Batch_size, atten_len_k, embed_size] atten_len_k == len_q
    142. # softmax 不改变矩阵的维度,这里对行方向对行向量进行归一化 这里对输出和残差 进行Add && Norm 操作
    143. atten_ret = (residual + torch.softmax(atten, dim=1))
    144. atten_ret = nn.LayerNorm(embed_size).to(device)(atten_ret)
    145. return atten_ret
    146. class Feed_forward(nn.Module):
    147. """
    148. 对应于原论文中的Feed-Forward流程
    149. 查看某个数据是否存储于cuda上,可键入命令: x.is_cuda 其中x为变量
    150. """
    151. def __init__(self):
    152. super().__init__()
    153. self.W1 = nn.Linear(embed_size, d_ff).to(device)
    154. self.W2 = nn.Linear(d_ff, embed_size).to(device)
    155. self.b1 = torch.rand(d_ff).to(device)
    156. self.b2 = torch.rand(embed_size).to(device)
    157. self.relu = nn.ReLU().to(device)
    158. self.dropout = nn.Dropout(p=dropout)
    159. def forward(self, enc_inputs):
    160. """
    161. :param enc_inputs: # enc_inputs -> [Batch_size, seq_len, embedding_size]
    162. # atten -> [Batch_size, seq_len, embedding_size]
    163. :return:
    164. """
    165. fc1 = self.W1(enc_inputs) + self.b1
    166. fc1 = self.relu(fc1)
    167. fc2 = self.W2(fc1) + self.b2 # fc2 -> [Batch_size, seq_len, embedding_size]
    168. output = fc2 # output -> [Batch_size, seq_len, embedding_size]
    169. residual = enc_inputs
    170. Add_And_Norm = nn.LayerNorm(embed_size).cuda()(output + residual)
    171. return Add_And_Norm
    172. class Encoder_layer(nn.Module):
    173. def __init__(self):
    174. super().__init__()
    175. self.multi_head_attention = Multi_Head_Attention()
    176. self.feed_forward = Feed_forward()
    177. def forward(self, enc_inputs, enc_atten_mask):
    178. """
    179. :param enc_inputs: # enc_inputs -> [Batch_size, seq_len, embedding_size]
    180. :param enc_atten_mask: # enc_atten_mask -> [Batch_size, seq_len, seq_len]
    181. :return:
    182. """
    183. # 传入多层注意力机制的输入Q、K、V 都假定为一样的
    184. atten_output = self.multi_head_attention(enc_inputs, enc_inputs, enc_inputs, enc_atten_mask) # 这里得到的是注意力矩阵
    185. output = self.feed_forward(atten_output).to(device) # output -> [Batch_size, seq_len, embeded_size]
    186. return output, atten_output
    187. class Decoder_layer(nn.Module):
    188. def __init__(self):
    189. super().__init__()
    190. self.masked_multi_head_attention = Multi_Head_Attention()
    191. self.multi_head_attention = Multi_Head_Attention()
    192. self.feed_forward = Feed_forward()
    193. self.embed = torch.nn.Embedding(dst_vocab_size, embed_size)
    194. def forward(self, dec_input, enc_output, dec_atten_mask, dec_mask_atten_mask):
    195. """
    196. :param dec_input: [Batch_size, dst_len]
    197. :param enc_output: [Batch_size, src_len]
    198. :param dec_atten_mask: [Batch_size, dst_len, dst_len]
    199. :param dec_mask_atten_mask: [Batch_size, dst_len, src_len]
    200. :return: [Batch_size,dst_len,embedding_size]
    201. 查看变量类型 -> 采用 data.dtype
    202. """
    203. # 得到Decoder Layer的第一个多层注意力输出,输入为解码层的输入
    204. masked_atten_outputs = self.masked_multi_head_attention(dec_input, dec_input, dec_input, dec_atten_mask)
    205. # masked_atten_outputs -> [Batch_size, dst_len, embedding_size]
    206. # enc_outputs -> [Batch_size, src_len, embedding_size]
    207. # Decoder层第二个多层注意力机制, K和V采用Encoder编码信息矩阵进行计算,Q采用上一个Decoder block进行计算
    208. dec_atten_outputs = self.multi_head_attention(masked_atten_outputs, enc_output, enc_output,
    209. dec_mask_atten_mask)
    210. output = self.feed_forward(dec_atten_outputs) # output -> [Batch_size, dst_len, embeded_size]
    211. return output, dec_atten_outputs
    212. class Encoder(nn.Module):
    213. def __init__(self):
    214. super().__init__()
    215. self.layers = nn.ModuleList(Encoder_layer() for _ in range(num_layers))
    216. self.embed = torch.nn.Embedding(src_vocab_size, embed_size)
    217. self.pos = PositionalEncoding(embed_size)
    218. def forward(self, enc_input_encoder):
    219. """
    220. :param enc_input_encoder: -> [Batch_size, seq_len]
    221. :param enc_atten_mask: -> [Batch_size, seq_len, seq_len]
    222. :return:
    223. """
    224. enc_atten_mask = get_attn_pad_mask(enc_input_encoder, enc_input_encoder) # [Batch_size, seq_len, seq_len]
    225. enc_input_embed = self.embed(enc_input_encoder) # output -> [Batch_size, seq_len, embed_size]
    226. output = self.pos(enc_input_embed.transpose(0, 1)).transpose(0, 1)
    227. for layer in self.layers:
    228. output, atten = layer(output, enc_atten_mask) # output -> [Batch_size, seq_len, embedding_size]
    229. return output
    230. class Decoder(nn.Module):
    231. def __init__(self):
    232. super().__init__()
    233. self.layers = nn.ModuleList(Decoder_layer() for _ in range(num_layers))
    234. self.embed = torch.nn.Embedding(dst_vocab_size, embed_size) # 这里的维度一定要正确,否则会由于索引超过范围报错
    235. self.pos = PositionalEncoding(embed_size)
    236. def forward(self, decoder_enc_input, decoder_dec_input, decoder_enc_output):
    237. """
    238. :param decoder_enc_input: [Batch_size, dst_len]
    239. :param decoder_dec_input: [Batch_size, dst_len]
    240. :param decoder_enc_output:[Batch_size, src_len, embedding_size]
    241. :return:
    242. """
    243. dec_inputs_embed = self.embed(decoder_dec_input) # [Batch_size, dst_len, embedding_size]
    244. # output = self.pos(dec_inputs_embed)
    245. output = self.pos(dec_inputs_embed.transpose(0, 1)).transpose(0, 1).to(device)
    246. dec_mask_atten_mask = get_attn_pad_mask(dec_inputs, dec_inputs).to(device) # [Batch_size, dst_len, dst_len]
    247. dec_atten_mask = get_attn_pad_mask(decoder_dec_input, decoder_dec_input).to(device) # [Batch_size, dst_len, src_len]
    248. for layer in self.layers:
    249. output, atten = layer(output, decoder_enc_output, dec_mask_atten_mask, dec_atten_mask)
    250. # output, atten = layer(output, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask)
    251. # output -> [Batch_size, seq_len, embedding_size]
    252. # return nn.Softmax(dim=0)(output)
    253. return output
    254. class Transformer(nn.Module):
    255. def __init__(self):
    256. super().__init__()
    257. self.encoder = Encoder().to(device)
    258. self.decoder = Decoder().to(device)
    259. self.lr_rate = 1e-3
    260. self.optim = torch.optim.Adam(self.parameters(), lr=self.lr_rate)
    261. self.crition = nn.CrossEntropyLoss(ignore_index=0)
    262. self.projection = nn.Linear(embed_size, dst_vocab_size, bias=False).to(device)
    263. def forward(self, enc_input, dec_input, dec_output):
    264. """
    265. :param enc_input: -> [Batch_size, src_len]
    266. :param dec_input: -> [Batch_size, dst_len]
    267. :param dec_output: -> [Batch_size, dst_len]
    268. :return:
    269. """
    270. encode_outputs = self.encoder(enc_input) # enc_outputs -> [Batch_size, src_len, embedding_size]
    271. # decode_output -> [Batch_size, dst_len, embedding_size]
    272. decode_output = self.decoder(enc_input, dec_input, encode_outputs)
    273. decode_output = self.projection(decode_output) # decode_output -> [Batch_size, dst_len, dst_vocab_size]
    274. out_put = torch.argmax(decode_output, 2) # 沿维度为2的维度求取最大值的索引
    275. loss = 0
    276. # decode_output = decode_output.view(-1, out_put.size(-1))
    277. # out_put = out_put.view(
    278. # for i in range(len(decode_output)): # 对seq的每一个输出进行二分类损失计算
    279. # loss += self.crition(decode_output[i], dec_output[i])
    280. dec_output = dec_output.view(-1) # decode_output -> [Batch_size * dst_len]
    281. decode_output = decode_output.view(-1, decode_output.size(-1)) # [Batch_size * dst_len, dst_vocab_size]
    282. # print("dec_output.size() ", dec_output.size())
    283. # print("decode_output.size()", decode_output.size())
    284. loss = self.crition(decode_output, dec_output)
    285. return out_put, loss
    286. device = "cuda" if torch.cuda.is_available() else "cpu"
    287. model = Transformer().to(device)
    288. lr_rate = 1e-3
    289. # optimizer = torch.optim.Adam(model.parameters(), lr=lr_rate)
    290. optimizer = torch.optim.SGD(model.parameters(), lr=lr_rate, momentum=0.99)
    291. for i in range(1000):
    292. for enc_inputs_i, dec_inputs_i, dec_output_i in train_data_iter:
    293. enc_inputs_i, dec_inputs_i, dec_output_i = enc_inputs_i.to(device), dec_inputs_i.to(device), dec_output_i.to(
    294. device)
    295. predict, loss_i = model(enc_inputs_i, dec_inputs_i, dec_output_i)
    296. optimizer.zero_grad()
    297. loss_i.backward()
    298. optimizer.step()
    299. if i % 100 == 0:
    300. print("step {0} loss {1}".format(i, loss_i))
    301. # print("predict ", predict)

    先上代码。

    根据Transformer的论文,采用Pytorch框架基于Encoder_Decoder+Multi-Head Attention多层注意力模型构建的Transformer框架,用来进行机器翻译

    为了方便,语料库采用自己构造的语料库

    1. # enc_input dec_input dec_output
    2. ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],
    3. ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']

    数据集的构建采用DataSet和DataLoader两个原生类,将语料库采用键值对的形式进行保存

    1. enc_input: [[1, 2, 3, 4, 0], [1, 2, 3, 5, 0]]
    2. dec_input: [[6, 1, 2, 3, 4, 8], [6, 1, 2, 3, 5, 8]]
    3. dec_output:[[1, 2, 3, 4, 8, 7], [1, 2, 3, 5, 8, 7]]

    这三个输入分别为编码器的输入,解码器的输入以及解码器的输出。这里解码器的输入为真实的解码器的输出是为了在编解码器模型中采用Teacher-Forcing模型进行训练。(与不采用Teacher-Forcing方法相比,在小规模的样本中模型的准确率区别不大,大规模样本中没有测试过)

    根据Transforer原论文,每个词向量维度为512维,这里采用Torch.nn.Embedding将词索引转化为向量

    self.embed = torch.nn.Embedding(src_vocab_size, embed_size)
    src_vocab_size 为语料库的大小,embed_size为向量的维度。由于Transformer没有向RNN/LSTM/GRU等循环神经网络一样对具有时序性的序列进行建模,故这里有一个基于位置的Encoding:
    1. class PositionalEncoding(nn.Module):
    2. def __init__(self, d_model, dropout=0.1, max_len=5000):
    3. super(PositionalEncoding, self).__init__()
    4. self.dropout = nn.Dropout(p=dropout)
    5. pe = torch.zeros(max_len, d_model)
    6. position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
    7. div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
    8. pe[:, 0::2] = torch.sin(position * div_term)
    9. pe[:, 1::2] = torch.cos(position * div_term)
    10. pe = pe.unsqueeze(0).transpose(0, 1)
    11. self.register_buffer('pe', pe)
    12. def forward(self, x):
    13. """
    14. x: [seq_len, batch_size, d_model]
    15. """
    16. x = x + self.pe[:x.size(0), :]
    17. return self.dropout(x)

    其实就是将语料根据相对位置采用正余弦函数进行编码。将编码后的值与self.embed进行累加,作为网络的输入,此时向量中既包含了不同分词之间的位置信息又包含了语义信息

    1. enc_input_embed = self.embed(enc_input_encoder)
    2. output = self.pos(enc_input_embed.transpose(0, 1)).transpose(0, 1)

    在编码器中,

    Encoder由num_heads个同样大小的编码器串联。为什么要串联,我个人觉得跟CV里面的加深网络深度差不多的意思。这里采用 nn.ModuleList 将num_heads个编码器连接起来

    self.layers = nn.ModuleList(Encoder_layer() for _ in range(num_layers))

    解码器的构造跟编码器类似,但是解码器有两个多层注意力层

    第一个多层注意力层是将解码器的输入进行注意力转换得到与输入同样大小的输出

    比如,解码器的输入在经过位置编码和embedding后的输入:dec_inputs -> [Batch_size, dst_len, embedding_size]

    解码器的输出也是同样大小,dec_outputs -> [Batch_size, dst_len, embedding_size]

    第二层注意力层的input_Q为第一层的输出,input_K和input_Q为Encoder的输出,这样做的目的是根据解码器的输入找到和编码器的输出之前的相关性。

  • 相关阅读:
    Spring后置处理器BeanFactoryPostProcessor与BeanPostProcessor源码解析
    【最详细demo】雪花算法详细解释
    L1-020 帅到没朋友(Python3)
    前端——Layui的导航栏与tab页联动
    集合框架1
    第四章:项目整合管理
    Vue(五)——使用脚手架(2)
    用Python Pygame做的一些好玩的小游戏
    Clickhouse填坑记4:Too many parts问题分析
    前端面试宝典React篇16 React Hook 的使用限制有哪些?
  • 原文地址:https://blog.csdn.net/linxizi0622/article/details/125542432