-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
89 lines (70 loc) · 2.68 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from galore import GaLore
# Simple Transformer Model
class TransformerModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_heads, num_layers):
super(TransformerModel, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.transformer = nn.Transformer(
d_model=embed_dim,
nhead=num_heads,
num_encoder_layers=num_layers,
num_decoder_layers=num_layers,
)
self.fc = nn.Linear(embed_dim, vocab_size)
def forward(self, src, tgt):
src_embed = self.embedding(src)
tgt_embed = self.embedding(tgt)
out = self.transformer(src_embed, tgt_embed)
out = self.fc(out)
return out
def main():
vocab_size = 100
embed_dim = 64
num_heads = 4
num_layers = 2
batch_size = 32
num_epochs = 10
learning_rate = 0.001
# Generate toy data
seq_length = 20
num_samples = 1000
src_data = torch.randint(0, vocab_size, (num_samples, seq_length))
tgt_data = torch.randint(0, vocab_size, (num_samples, seq_length))
dataset = TensorDataset(src_data, tgt_data)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = TransformerModel(vocab_size, embed_dim, num_heads, num_layers)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
galore = GaLore(model, rank=4, update_freq=200)
# Training loop
for epoch in range(num_epochs):
for batch_idx, (src, tgt) in enumerate(dataloader):
src, tgt = src.to(device), tgt.to(device)
optimizer.zero_grad()
# Shift the source and target sequences by one position
src_input = src[:, :-1]
tgt_input = tgt[:, :-1]
tgt_output = tgt[:, 1:]
output = model(src_input, tgt_input)
loss = nn.functional.cross_entropy(
output.view(-1, vocab_size), tgt_output.reshape(-1)
)
loss.backward()
# Update the model parameters using GaLore
def update_func(lor_grad):
def closure():
optimizer.step()
return lor_grad
return closure()
galore.step(update_func)
if (batch_idx + 1) % 3 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}], Batch [{batch_idx+1}/{len(dataloader)}], Loss: {loss.item():.4f}"
)
if __name__ == "__main__":
main()