broskicodes commited on
Commit
8ac9b44
1 Parent(s): e19b1b8

Upload model

Browse files
Files changed (2) hide show
  1. config.json +2 -2
  2. nano_gpt_model.py +134 -0
config.json CHANGED
@@ -3,8 +3,8 @@
3
  "SimpleStories4MModel"
4
  ],
5
  "auto_map": {
6
- "AutoConfig": "config_4m.SimpleStories4MConfig",
7
- "AutoModelForCausalLM": "model_4m.SimpleStories4MModel"
8
  },
9
  "block_size": 1080,
10
  "dropout": 0.1,
 
3
  "SimpleStories4MModel"
4
  ],
5
  "auto_map": {
6
+ "AutoConfig": "configuration_ss4m.SimpleStories4MConfig",
7
+ "AutoModelForCausalLM": "modeling_ss4m.SimpleStories4MModel"
8
  },
9
  "block_size": 1080,
10
  "dropout": 0.1,
nano_gpt_model.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ # model architecture
7
+ class AttentionHead(nn.Module):
8
+ """a single head of self attention"""
9
+
10
+ def __init__(self, n_embed, head_size, block_size, dropout):
11
+ super().__init__()
12
+ self.key = nn.Linear(n_embed, head_size, bias=False)
13
+ self.query = nn.Linear(n_embed, head_size, bias=False)
14
+ self.value = nn.Linear(n_embed, head_size, bias=False)
15
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
16
+
17
+ self.dropout = nn.Dropout(dropout)
18
+
19
+ def forward(self, x):
20
+ B, T, C = x.shape
21
+ K = self.key(x) # (B, T, C)
22
+ Q = self.query(x) # (B, T, C)
23
+
24
+ wei = Q @ K.transpose(-2,-1) * C**-0.5 # (B, T, C) @ (B, H, C) -> (B, T, T)
25
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
26
+ wei = F.softmax(wei, dim=-1)
27
+ wei = self.dropout(wei)
28
+
29
+ V = self.value(x) # (B, T, C)
30
+ out = wei @ V # (B, T, T) @ (B, T, C) -> (B, T, C)
31
+ return out
32
+
33
+ class MultiHeadAttention(nn.Module):
34
+ """a multi-head self attention layer"""
35
+
36
+ def __init__(self, n_embed, n_heads, head_size, block_size, dropout):
37
+ super().__init__()
38
+ self.heads = nn.ModuleList([AttentionHead(n_embed, head_size, block_size, dropout) for _ in range(n_heads)])
39
+ self.fc = nn.Linear(head_size * n_heads, n_embed)
40
+ self.dropout = nn.Dropout(dropout)
41
+
42
+ def forward(self, x):
43
+ out = torch.cat([h(x) for h in self.heads], dim=-1) # (B, T, n_heads*C)
44
+ out = self.fc(out) # (B, T, C)
45
+ out = self.dropout(out)
46
+ return out
47
+
48
+ class FeedForward(nn.Module):
49
+ def __init__(self, n_embed, n_hidden, dropout):
50
+ super().__init__()
51
+ self.net = nn.Sequential(
52
+ nn.Linear(n_embed, n_hidden),
53
+ nn.ReLU(),
54
+ nn.Linear(n_hidden, n_embed),
55
+ nn.Dropout(dropout)
56
+ )
57
+
58
+ def forward(self, x):
59
+ return self.net(x)
60
+
61
+ class Block(nn.Module):
62
+ def __init__(self, n_embed, n_heads, block_size, dropout):
63
+ super().__init__()
64
+ self.sa_heads = MultiHeadAttention(n_embed, n_heads, n_embed // n_heads, block_size, dropout)
65
+ self.ffwd = FeedForward(n_embed, n_embed*4, dropout)
66
+ self.ln1 = nn.LayerNorm(n_embed)
67
+ self.ln2 = nn.LayerNorm(n_embed)
68
+
69
+
70
+ def forward(self, x):
71
+ x = x + self.sa_heads(self.ln1(x)) # [batch_size, block_size, n_embed]
72
+ x = x + self.ffwd(self.ln2(x)) # [batch_size, block_size, n_embed]
73
+ return x
74
+
75
+ class NanoGPT(nn.Module):
76
+ def __init__(self, hyperparameters, device="cpu"):
77
+ super().__init__()
78
+
79
+ # hyperparameters
80
+ vocab_size = hyperparameters['vocab_size']
81
+ block_size = hyperparameters['block_size']
82
+ n_embed = hyperparameters['n_embed']
83
+ n_heads = hyperparameters['n_heads']
84
+ n_layers = hyperparameters['n_layers']
85
+ dropout = hyperparameters['dropout']
86
+
87
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embed)
88
+ self.position_embedding_table = nn.Embedding(block_size, n_embed)
89
+ self.blocks = nn.Sequential(*[Block(n_embed, n_heads, block_size, dropout) for _ in range(n_layers)])
90
+ self.ln_f = nn.LayerNorm(n_embed)
91
+ self.lm_head = nn.Linear(n_embed, vocab_size)
92
+
93
+ self.device = device
94
+ self.block_size = block_size
95
+
96
+ def forward(self, idx, targets=None):
97
+ # idx and target are both [batch_size, block_size]
98
+ B, T = idx.shape
99
+
100
+ tok_emb = self.token_embedding_table(idx) # [batch_size, block_size, n_embed]
101
+ pos_emb = self.position_embedding_table(torch.arange(T, device=self.device)) # [block_size, n_embed]
102
+ x = tok_emb + pos_emb # [batch_size, block_size, n_embed]
103
+ x = self.blocks(x)
104
+ x = self.ln_f(x)
105
+ logits = self.lm_head(x) # [batch_size, block_size, vocab_size]
106
+
107
+ if targets is None:
108
+ loss = None
109
+ else:
110
+ B, T, C = logits.shape
111
+ logits = logits.view(B*T, C)
112
+ targets = targets.view(B*T)
113
+ loss = F.cross_entropy(logits, targets)
114
+
115
+ return logits, loss
116
+ # return 0, 0
117
+
118
+ def generate(self, idx, max_new_tokens=100):
119
+ # idx is (B, T)
120
+ for _ in range(max_new_tokens):
121
+ # get the last block_size tokens
122
+ idx_cond = idx[:, -self.block_size:] # (B, T)
123
+ # get the predictions
124
+ logits, _ = self(idx_cond)
125
+ # focus only on the last time step
126
+ logits = logits[:, -1, :] # becomes (B, C)
127
+ # apply softmax to get probabilities
128
+ probs = F.softmax(logits, dim=1) # (B, C)
129
+ # sample from the distribution
130
+ idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
131
+ # append sampled index to the running sequence
132
+ idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
133
+
134
+ return idx