使用Transformer构建机器翻译模型

数据读取和处理

根据之前的使用注意力机制的Seq2Seq模型,使用其数据读取和处理的部分:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from collections import Counter
import re
from tqdm import tqdm
from torch.nn.utils.rnn import pad_sequence
from torch.optim.lr_scheduler import _LRScheduler
import time
import math

PAD_IDX = 0
SOS_IDX = 1
EOS_IDX = 2
UNK_IDX = 3


class TextDataset(Dataset):
def __init__(self, en_list, cn_list):
self.cn_data = cn_list
self.en_data = en_list

def __len__(self):
return len(self.en_data)

def __getitem__(self, index):
return (
torch.tensor(self.en_data[index], dtype=torch.long),
torch.tensor(self.cn_data[index], dtype=torch.long),
)

def collate_fn(batch):
en_sequence, cn_sequence = zip(*batch)
padded_en_sequence = pad_sequence(en_sequence, batch_first=True, padding_value=PAD_IDX)
padded_cn_sequence = pad_sequence(cn_sequence, batch_first=True, padding_value=PAD_IDX)

return padded_en_sequence, padded_cn_sequence

def en_tokenize(text):
text = text.lower()
text_list = re.split('(\W)', text)
return [text.strip() for text in text_list if text.strip()]

def cn_tokenize(text):
return list(text)

def build_vocab(tokenized_text, vocab_limit=800000):
if isinstance(tokenized_text[0], list):
tokenized_data = [text for text_list in tokenized_text for text in text_list]
else:
tokenized_data = tokenized_text
word_count = Counter(tokenized_data)
vocab = sorted(word_count, key=word_count.get, reverse=True)[: vocab_limit]
vocab = ['<PAD>', '<SOS>', '<EOS>', '<UNK>'] + vocab
word_to_idx = {word: i for i, word in enumerate(vocab)}
return word_to_idx, vocab

with open('./中英翻译数据集/train.zh', 'r') as f:
cn_data_1 = f.readlines()[:1000]
cn_data = [text.strip() for text in cn_data_1]

with open('./中英翻译数据集/train.en', 'r') as f:
en_data_1 = f.readlines()[:1000]
en_data = [text.strip() for text in en_data_1]

cn_data_tokenized = [cn_tokenize(text) for text in cn_data]
en_data_tokenized = [en_tokenize(text) for text in en_data]

cn_word_to_idx, cn_vocab = build_vocab(cn_data_tokenized)
cn_idx_to_word = {i: word for i, word in enumerate(cn_vocab)}
cn_vocab_size = len(cn_vocab)

en_word_to_idx, en_vocab = build_vocab(en_data_tokenized)
en_idx_to_word = {i: word for i, word in enumerate(en_vocab)}
en_vocab_size = len(en_vocab)


# 英译中,中文开头需要为`<SOS>`,结尾需要为`<EOS>`
cn_data_tokenized = [['<SOS>'] + text_list + ['<EOS>'] for text_list in cn_data_tokenized]

cn_data_idx = [[cn_word_to_idx[word] if word in cn_word_to_idx else UNK_IDX for word in text_list] for text_list in cn_data_tokenized]
en_data_idx = [[en_word_to_idx[word] if word in en_word_to_idx else UNK_IDX for word in text_list] for text_list in en_data_tokenized]


dataset = TextDataset(en_data_idx, cn_data_idx)
# dataloader = DataLoader(dataset, batch_size=64, shuffle=True, collate_fn=collate_fn)


Transformer模型搭建

之前使用Masked多头自注意力机制的部分在此处可以优化,无需每个Masked多头自注意力机制都生成一个下三角矩阵然后结合原本的target的mask,这样会导致重复运算,可以在给模型传入trg_mask之前就提前将trg_mask与下三角掩码结合,然后传入给模型的trg_mask就包含了padding的掩码和防止看到未来信息的掩码,因此此处不再在模型中生成防止看到未来信息的掩码。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
class PositionalEncoding(nn.Module):
def __init__(self, embed_dim, max_len=5000):
super().__init__()
# 创建一个位置编码矩阵,形状为 [max_len, embed_dim]
pe = torch.zeros(max_len, embed_dim)

# 生成位置列向量
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) # [max_len, 1]
# 计算分母,用于后续的正弦和余弦函数计算
div_term = torch.exp(torch.arange(0, embed_dim, 2, dtype=torch.float) * (-torch.log(torch.tensor(10000.0)) / embed_dim))
# 计算偶数位置的正弦值
pe[:, 0::2] = torch.sin(position * div_term)
# 计算奇数位置的余弦值
pe[:, 1::2] = torch.cos(position * div_term)
# 增加一个维度,使其形状变为[1, max_len, embed_dim],这样可以广播到批量数据上
pe = pe.unsqueeze(0)
# 将位置编码矩阵注册为缓冲区,这样它就不会被视为模型的参数,但会随着模型一起保存和加载
self.register_buffer('pe', pe)

def forward(self, x):
# 将位置编码添加到输入的嵌入向量上
x = x + self.pe[:, :x.size(1)] # x的seq_len不一定等于max_len,因此此处按照x的seq_len来截取位置编码矩阵
return x

class MultiheadSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.1):
super().__init__()
assert embed_dim % num_heads == 0, "Embedding dimension must be divisible by number of heads"

self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads

self.qkv_proj = nn.Linear(embed_dim, 3*embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(dropout)

def forward(self, x, mask=None):
# x: [batch_size, seq_len, embed_dim]
# mask: [batch_size, 1, 1, seq_len]
batch_size, seq_len, _ = x.shape

# 生成Q, K, V
qkv = self.qkv_proj(x) # [batch_size, seq_len, 3*embed_dim]
Q, K, V = qkv.chunk(3, dim=-1) # [barch_size, seq_len, embed_dim]

# 分割成多个头
Q = Q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
K = K.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
V = V.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)

# 计算注意力分数
attn_score = torch.matmul(Q, K.transpose(-2, -1)) / (self.head_dim ** 0.5)

if mask is not None:
attn_score = attn_score.masked_fill(mask == 0, float('-inf'))

attn_weights = torch.softmax(attn_score, dim=-1)
attn_weights = self.dropout(attn_weights)

context = torch.matmul(attn_weights, V)
context = context.transpose(1, 2).contiguous().view(batch_size, seq_len, self.embed_dim)

output = self.out_proj(context)
return output


class PositionalwiseFFN(nn.Module):
def __init__(self, embed_dim, hidden_dim, dropout=0.1):
super().__init__()
self.linear1 = nn.Linear(embed_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, embed_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)

def forward(self, x):
x = self.relu(self.linear1(x))
x = self.dropout(x)
x = self.linear2(x)
return x

class EncoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_dim, dropout=0.1):
super().__init__()
self.self_attn = MultiheadSelfAttention(embed_dim, num_heads, dropout)
self.ffn = PositionalwiseFFN(embed_dim, hidden_dim, dropout)
self.norm1 = nn.LayerNorm(embed_dim)
self.norm2 = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout)

def forward(self, x, mask=None):
# 多头注意力
attn_output = self.self_attn(x, mask)
# 残差+规范化
x = self.norm1(x + self.dropout(attn_output))
# PositionalwiseFFN
ffn_output = self.ffn(x)
# 残差+规范化
x = self.norm2(x + self.dropout(ffn_output))
return x


class Encoder(nn.Module):
def __init__(self, vocab_size, num_layers, embed_dim, num_heads, hidden_dim, max_len=5000, dropout=0.1):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.positional_encoding = PositionalEncoding(embed_dim, max_len)
self.layers = nn.ModuleList([
EncoderLayer(embed_dim, num_heads, hidden_dim, dropout)
for _ in range(num_layers)
])
self.dropout = nn.Dropout(dropout)

def forward(self, x, mask=None):
x = self.embedding(x)
x = self.positional_encoding(x)
x = self.dropout(x)
for layer in self.layers:
x = layer(x, mask)
return x


class EncoderDecoderAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.1):
super().__init__()
assert embed_dim % num_heads == 0, "Embedding dimension must be divisible by number of heads"

self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads

self.q_proj = nn.Linear(embed_dim, embed_dim)
self.kv_proj = nn.Linear(embed_dim, 2*embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.dropout = nn.Dropout(dropout)

def forward(self, enc_output, dec_output, src_mask=None):
batch_size, enc_len, _ = enc_output.shape
_, dec_len, _ = dec_output.shape

Q = self.q_proj(dec_output).view(batch_size, dec_len, self.num_heads, self.head_dim).transpose(1, 2)
kv = self.kv_proj(enc_output)
K, V = kv.chunk(2, dim=-1)
K = K.view(batch_size, enc_len, self.num_heads, self.head_dim).transpose(1, 2)
V = V.view(batch_size, enc_len, self.num_heads, self.head_dim).transpose(1, 2)

attn_score = torch.matmul(Q, K.transpose(-2, -1)) / (self.head_dim ** 0.5)

if src_mask is not None:
attn_score = attn_score.masked_fill(src_mask == 0, float('-inf'))

attn_weights = torch.softmax(attn_score, dim=-1)
attn_weights = self.dropout(attn_weights)

context = torch.matmul(attn_weights, V)
context = context.transpose(1, 2).contiguous().view(batch_size, dec_len, self.embed_dim)

output = self.out_proj(context)
return output


class DecoderLayer(nn.Module):
def __init__(self, embed_dim, num_heads, hidden_dim, dropout=0.1):
super().__init__()
self.self_attn = MultiheadSelfAttention(embed_dim, num_heads, dropout)
self.cross_attn = EncoderDecoderAttention(embed_dim, num_heads, dropout)
self.ffn = PositionalwiseFFN(embed_dim, hidden_dim, dropout)
self.norm1 = nn.LayerNorm(embed_dim)
self.norm2 = nn.LayerNorm(embed_dim)
self.norm3 = nn.LayerNorm(embed_dim)
self.dropout = nn.Dropout(dropout)

def forward(self, x, enc_output, src_mask=None, trg_mask=None):
# 掩码多头自注意力机制
attn_output1 = self.self_attn(x, trg_mask)
x = self.norm1(x + self.dropout(attn_output1))

# 编码器 - 解码器注意力
attn_output2 = self.cross_attn(enc_output, x, src_mask)
x = self.norm2(x + self.dropout(attn_output2))

# PositionalwiseFFN
ffn_output = self.ffn(x)
x = self.norm3(x + self.dropout(ffn_output))
return x


class Decoder(nn.Module):
def __init__(self, vocab_size, num_layers, embed_dim, num_heads, hidden_dim, max_len=5000, dropout=0.1):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.positional_encoding = PositionalEncoding(embed_dim, max_len)
self.layers = nn.ModuleList([
DecoderLayer(embed_dim, num_heads, hidden_dim, dropout)
for _ in range(num_layers)
])
self.dropout = nn.Dropout(dropout)

def forward(self, x, enc_output, src_mask=None, trg_mask=None):
x = self.embedding(x)
x = self.positional_encoding(x)
x = self.dropout(x)

for layer in self.layers:
x = layer(x, enc_output, src_mask, trg_mask)
return x


class Transformer(nn.Module):
def __init__(self, src_vocab_size, trg_vocab_size, num_layers, embed_dim, num_heads, hidden_dim, device='cpu', max_len=5000, dropout=0.1):
super().__init__()
self.device = device
self.encoder = Encoder(src_vocab_size, num_layers, embed_dim, num_heads, hidden_dim, max_len, dropout)
self.decoder = Decoder(trg_vocab_size, num_layers, embed_dim, num_heads, hidden_dim, max_len, dropout)
self.fc = nn.Linear(embed_dim, trg_vocab_size)

def forward(self, src, trg, src_mask=None, trg_mask=None):
enc_output = self.encoder(src, src_mask)
dec_output = self.decoder(trg, enc_output, src_mask, trg_mask)
output = self.fc(dec_output)
return output

搭建模型:

1
2
3
4
5
6
7
8
9
10
11
12
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

model = Transformer(
en_vocab_size,
cn_vocab_size,
num_layers=6,
embed_dim=512,
num_heads=8,
hidden_dim=2048,
device=device,
dropout=0.1
).to(device)

参数初始化

使用Xavier初始化:

1
2
3
4
5
6
7
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)

model.apply(init_weights)

优化器和调度器

优化器

在Transformer的原论文《Attention Is All You Need》中,使用了以下Adam优化器参数:

参数 作用
β₁ (beta1) 0.9 控制一阶矩估计(动量)的指数衰减率
β₂ (beta2) 0.98 控制二阶矩估计(自适应学习率)的指数衰减率
ε (epsilon) 1e-9 防止分母为零的极小常数,稳定数值计算
学习率 (lr) 由Noam调度器动态调整 初始值设为0.0,通过Noam调度器动态计算实际学习率(例如峰值约为1e-3)
1
optimizer = optim.Adam(model.parameters(), lr=0.0, betas=(0.9, 0.98), eps=1e-9)

调度器

Transformer使用了如下调度器(Noam调度器)公式,用于动态调整学习率: \[ \eta = f\cdot d^{-0.5}\cdot\min(t^{-0.5},t\cdot s^{-1.5}) \] 其中:

  • \(\eta\)为学习率
  • \(f\)factor)为缩放因子,控制学习率的最大值,实际上扮演了“初始学习率”的角色,通常设为2
  • \(d\)d_model)模型隐藏层参数维度,比如在机器翻译中,为词嵌入维度embed_dim
  • \(t\)为当前训练步数,通常为iter,即每个小批量(batch)算一步,而不是每个epoch算一步(Transformer通常会使用很大的数据集,这样会导致学习率更新过慢)
  • \(s\)warmup_steps)为预热阶段的步数,通常设为4000

\(t^{-0.5} > t\cdot s^{-1.5}\)\[ t^{-0.5}>t\cdot s^{-1.5}\Rightarrow t^{-1.5} > s^{-1.5} \]\[ \frac{1}{t^{1.5}} > \frac{1}{s^{1.5}}\Rightarrow t^{1.5} < s^{1.5}\Rightarrow t<s \] 可以得出, \[ \eta = \left\{ \begin{aligned} &f\cdot d^{-0.5}\cdot t\cdot s^{-1.5}\quad &t\leq s\\ \\ &f\cdot d^{-0.5}\cdot t^{-0.5}\quad &t>s \end{aligned} \right. \] 因此,在预测阶段,学习率随着训练步数线性增加,预热结束后,学习率随着训练步数降低,由于\(\eta\propto\frac{1}{\sqrt{t}}\),学习率下降速度会又快到慢。

学习率会在预热结束时达到最大值,此时\(t=s\)\[ \eta_\max = f\cdot d^{-0.5}\cdot s^{-0.5} \]factor=2embed_dim=512warmup_steps=4000时,学习率的最大值为: \[ \eta_\max = 2\times\frac{1}{\sqrt{512}}\times\frac{1}{\sqrt{4000}} \approx 0.0013975 \]

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
class NoamLR(_LRScheduler):
def __init__(self, optimizer, factor, d_model, warmup_steps, last_epoch=-1):
self.factor = factor
self.d_model = d_model
self.warmup_steps = warmup_steps
super().__init__(optimizer, last_epoch)

def get_lr(self):
step = self.last_epoch + 1
fd = self.factor * (self.d_model ** -0.5)
return [fd*min(step**-0.5, step*(self.warmup_steps**-1.5)) for _ in self.base_lrs]

scheduler = NoamLR(
optimizer,
factor=2.0,
d_model=512,
warmup_steps=4000
)

训练

生成mask掩码

对于编码器输入src,只需要遮住padding即可,对于解码器输入trg,不仅需要遮住padding,同时需要遮住未来的信息:

1
2
3
4
5
6
7
8
9
10
11
12
13
def create_mask(src, trg):
src_mask = (src != PAD_IDX).unsqueeze(1).unsqueeze(2) # [batch_size, 1, 1, src_seq_len]

if trg is not None:
trg_pad_mask = (trg != PAD_IDX).unsqueeze(1).unsqueeze(2) # [batch_size, 1, 1, trg_seq_len]

# 因果掩码(防止看到未来信息)
trg_seq_len = trg.size(1)
causal_mask = torch.tril(torch.ones(trg_seq_len, trg_seq_len, dtype=torch.long)).unsqueeze(0).unsqueeze(1).to(trg_pad_mask.device) # [1, 1, trg_seq_len, trg_seq_len]
trg_mask = causal_mask & trg_pad_mask # [batch_size, 1, trg_seq_len, trg_seq_len]

return src_mask, trg_mask
return src_mask

训练函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def train_epoch(model, dataloader, optimizer, scheduler, loss_function, clip, device):
model.train()
epoch_loss = 0
total_tokens = 0

with tqdm(dataloader, unit="batch") as batch_iter:
for src, trg in batch_iter:
src, trg = src.to(device), trg.to(device)

# 准备decoder输入和输出
trg_input = trg[:, :-1] # decoder输入不包含最后的<EOS>
trg_output = trg[:, 1:] # decoder输出不包含最开始的<SOS>

# 生成mask
src_mask, trg_mask = create_mask(src, trg_input)

optimizer.zero_grad()

# 前向传播
output = model(src, trg_input, src_mask, trg_mask)

# 计算损失
loss = loss_function(
output.contiguous().view(-1, output.size(-1)),
trg_output.contiguous().view(-1)
)

# 反向传播和优化
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip) # 梯度裁剪
optimizer.step()
scheduler.step()

# 统计
non_pad_tokens = (trg_output != PAD_IDX).sum().item()
epoch_loss += loss.item() * non_pad_tokens
total_tokens += non_pad_tokens

# 更新进度条
batch_iter.set_postfix({
"loss": loss.item(),
"lr": optimizer.param_groups[0]['lr']
})
return epoch_loss / total_tokens

验证函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def evaluate(model, dataloader, loss_function, device):
model.eval()
epoch_loss = 0
total_tokens = 0

with torch.no_grad():
with tqdm(dataloader, unit="batch") as batch_iter:
for src, trg in batch_iter:
src, trg = src.to(device), trg.to(device)

trg_input = trg[:, :-1]
trg_output = trg[:, 1:]

src_mask, trg_mask = create_mask(src, trg_input)

output = model(src, trg_input, src_mask, trg_mask)

loss = loss_function(
output.contiguous().view(-1, output.size(-1)),
trg_output.contiguous().view(-1)
)

non_pad_tokens = (trg_output != PAD_IDX).sum().item()
epoch_loss += loss.item() * non_pad_tokens
total_tokens += non_pad_tokens
batch_iter.set_postfix({"val_loss": loss.item()})
return epoch_loss / total_tokens

损失函数

使用交叉熵损失,为了防止模型过于自信,使用平滑标签,降低正确类别的概率,提升其他错误类别的概率。忽略Padding部分的损失

1
loss_function = nn.CrossEntropyLoss(ignore_index=PAD_IDX, label_smoothing=0.1)

划分训练集和验证集

使用torch.utils.data.random_split()将数据集随机切分成训练集和验证集:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
batch_size = 64

train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size

train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])

train_loader = DataLoader(
train_dataset,
batch_size = batch_size,
shuffle=True,
collate_fn = collate_fn,
pin_memory=True,
num_workers=2
)

val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
pin_memory=True,
num_workers=2
)

训练循环

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
num_epochs = 30
clip = 3.0
best_val_loss = float('inf')
for epoch in range(num_epochs):
start_time = time.time()

train_loss = train_epoch(
model, train_loader, optimizer, scheduler,
loss_function, clip, device
)

val_loss = evaluate(model, val_loader, loss_function, device)

end_time = time.time()

# 保存最佳模型
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), 'best_model.pth')

print(f"Epoch: {epoch + 1} | Time: {end_time - start_time:.2f}s")
print(f"\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):.3f}") # PPL即困惑度
print(f"\t Val. Loss: {val_loss:.3f} | Val. PPL: {math.exp(val_loss):.3f}")

预测

预测函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def translate(model, src_sentence, en_word_to_idx, cn_idx_to_word, device, max_length=50):
model.eval()
# 将源句子分词并转换为索引
tokenized_src = en_tokenize(src_sentence)
src_indices = [en_word_to_idx.get(word, UNK_IDX) for word in tokenized_src]

# 转换为Tensor并添加batch维度
src = torch.tensor(src_indices, dtype=torch.long).unsqueeze(0).to(device)

# 创建源掩码
src_mask = (src != PAD_IDX).unsqueeze(1).unsqueeze(2).to(device)

# 编码器前向传播
with torch.no_grad():
enc_output = model.encoder(src, src_mask)

# 初始化目标序列(以<SOS>开头)
trg_indices = [SOS_IDX]

for _ in range(max_length):
trg = torch.tensor(trg_indices, dtype=torch.long).unsqueeze(0).to(device)

# 创建目标掩码(因果掩码)
trg_len = trg.size(1)
trg_mask = torch.tril(torch.ones(1, 1, trg_len, trg_len)).to(device)

# 解码器前向传播
with torch.no_grad():
output = model.decoder(trg, enc_output, src_mask, trg_mask)
logits = model.fc(output[:, -1, :]) # 取最后一个时间步

# 选择概率最高的词
next_idx = logits.argmax(-1).item()
trg_indices.append(next_idx)

# 遇到<EOS>停止生成
if next_idx == EOS_IDX:
break

# 将索引转换为单词并去除特殊标记
trg_tokens = [cn_idx_to_word[idx] for idx in trg_indices[1:-1]] # 去掉<SOS>和<EOS>
return ' '.join(trg_tokens)

模型加载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
model_m = Transformer(
en_vocab_size,
cn_vocab_size,
num_layers=6,
embed_dim=512,
num_heads=8,
hidden_dim=2048,
device=device,
dropout=0.1
)

state_dict = torch.load('best_model.pth', map_location=device)

model_m.load_state_dict(state_dict)
model_m = model_m.to(device)

注意,如果使用多卡训练并且保存模型参数,需要调用如下代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
model_m = Transformer(
en_vocab_size,
cn_vocab_size,
num_layers=4,
embed_dim=512,
num_heads=8,
hidden_dim=2048,
device=device,
dropout=0.1
)
# 加载时修复参数名称
state_dict = torch.load('best_model.pth', map_location=device)
new_state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()} # 移除前缀
model_m.load_state_dict(new_state_dict)
model_m = model_m.to(device)

原因:

在PyTorch中使用多卡训练(如DataparallelDistributeDataParallel)时,模型会被包装(wrapped)成一个新的模块。这会自动在模型参数名称前添加module.前缀。保存模型时,这些前缀会被保留在state_dict中。而加载时若直接使用单卡模型,参数名称不匹配会导致加载失败,因此需要手动移除module.前缀以匹配单卡模型的参数名称。

示例使用

1
2
3
4
5
6
7
8
9
10
11
test_sentences = [
"i love you.",
"what is your name?",
"this is a test sentence.",
"the weather is nice today."
]

for sent in test_sentences:
translation = translate(model_m, sent, en_word_to_idx, cn_idx_to_word, device)
print(f"英文: {sent}")
print(f"中文翻译: {translation}\n")

使用两张A100,batch_size=256,使用混合精度训练,数据集选取前200万个中英文句子,训练20个epoch,测试集PPL为12.474,最优模型预测结果如下:

可以看到对于语法比较复杂的句子翻译效果并不好,并且基本上都是直译。

遇到的问题和解决方案

  1. 批量较小时(64)模型损失很难降到4以下,而大批量(256)在A100双卡训练时则可以将模型损失将到3以下

    在一张4090显卡上使用混合精度训练,batch_size = 64 +32,损失下降到4.6左右就很难继续下降

    在两张A100显卡上使用混合精度训练,batch_size = 256,损失下降到

  2. 为什么多卡训练时一张卡的内存占用会比另一张卡多10GB,而整个模型不到1GB,单次加载的数据也不到1GB

    解决:应该是DataParrallel的问题,DataParrallel会有一个主卡,这个主卡上会承担梯度聚合和参数更新的额外任务,导致显存占用显著高于其他卡,解决方案是使用DistributedDataParrallel,这样每个GPU会独立维护副本模型,梯度通过all_reduce算法在各个显卡之间同步,显存占用会更加均衡,不会出现一张卡占用太多显存而其他显卡占用显存太少,只能设置小batch_size牺牲性能的情况。

  3. 模型在批量较大时内存占用很大,暂时的解决方案是使用混合精度训练。但是不清楚为什么在训练过程中内存占用越来越高,直到整个数据集都被访问一遍后内存才保持稳定(并没有存储梯度使得内存占用越来越大,每个batch后应该计算图、KQV等中间数据都被重置了才对)

  4. 固定的学习率无法使模型损失降低,解决方法是学习率预热+学习率衰减,使用的是原论文中的Noam调度器,比自己尝试构建的学习率预热+余弦退火的调度器效果更好。

  5. 由于每个序列的长度都不一样,因此模型训练时GPU占用率变动很大,很多时候无法充分利用GPU,目前的解决方案是使用原论文中的不以序列作为batch_size的单位,而是以tokens作为batch_size单位,每个batch_size到了一定tokens后就不再在该批次中加入序列,暂时未使用该方法尝试。


使用Transformer构建机器翻译模型
https://blog.shinebook.net/2025/04/26/人工智能/pytorch/使用Transformer构建机器翻译模型/
作者
X
发布于
2025年4月26日
许可协议