Meeting 6 - Emmett Results
Cross-references
Source: Google Doc · Presented at Meeting #6 Context: Emmett used the Aster agentic loop to optimize Karpathy's pure-Python MicroGPT, reducing memory from 80MB to 35MB Related: Meeting #6 full notes · Meeting #5 (Karpathy names task)
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett
\"Aster for 8 iterations, minimizing memory, and got this program which takes up 35 MB instead of the original 80 MB. I recommend trying out a few simpler mechanims than Aster to compare, like running Codex in a loop or OpenEvolve. Seems to mainly be making optimizations rather than fundamentally changing the algorithm though.\"
\"\"\"
The most atomic way to train and run inference for a GPT in pure, dependency-free Python.
This file is the complete algorithm.
Everything else is just efficiency.
\@karpathy
\"\"\"
import os # os.path.exists
import math # math.log, math.exp
import random # random.seed, random.choices, random.gauss, random.shuffle
random.seed(42) # Let there be order among chaos
# Let there be a Dataset `docs`: list[str\ of documents (e.g. a list of names)]
if not os.path.exists('input.txt'):
import urllib.request
names_url = '[https://raw.githubusercontent.com/karpathy/makemore/988aa59/names.txt]'
urllib.request.urlretrieve(names_url, 'input.txt')
docs = [line.strip() for line in open('input.txt') if line.strip()]
random.shuffle(docs)
print(f\"num docs: \")
# Let there be a Tokenizer to translate strings to sequences of integers (\"tokens\") and back
uchars = sorted(set(''.join(docs))) # unique characters in the dataset become token ids 0..n-1
BOS = len(uchars) # token id for a special Beginning of Sequence (BOS) token
vocab_size = len(uchars) + 1 # total number of unique tokens, +1 is for BOS
print(f\"vocab size: \")
# Let there be Autograd to recursively apply the chain rule through a computation graph
class Value:
__slots__ = ('data', 'grad', '_children', '_op')
def __init__(self, data, children=(), op=''):
self.data, self.grad = data, 0
self._children, self._op = children, op
def __add__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data + other.data, (self, other), '+')
def __mul__(self, other):
other = other if isinstance(other, Value) else Value(other)
return Value(self.data * other.data, (self, other), '*')
def __pow__(self, other): return Value(self.data**other, (self,), f'p')
def log(self): return Value(math.log(self.data), (self,), 'log')
def exp(self): return Value(math.exp(self.data), (self,), 'exp')
def relu(self): return Value(max(0, self.data), (self,), 'relu')
def __neg__(self): return self * -1
def __radd__(self, other): return self + other
def __sub__(self, other): return self + (-other)
def __rsub__(self, other): return other + (-self)
def __rmul__(self, other): return self * other
def __truediv__(self, other): return self * other**-1
def __rtruediv__(self, other): return other * self**-1
def backward(self):
topo, visited, stack = [\, set(), (self, False)]
while stack:
v, proc = stack.pop()
if not isinstance(v, Value) or v in visited: continue
if proc:
visited.add(v); topo.append(v)
else:
stack.append((v, True))
for c in v._children: stack.append((c, False))
self.grad = 1
for v in reversed(topo):
if v._op == '+':
v._children[0.grad += v.grad]
v._children[1.grad += v.grad]
elif v._op == '*':
v._children[0.grad += v._children\1.data * v.grad]
v._children[1.grad += v._children\0.data * v.grad]
elif v._op == 'dot':
n = len(v._children) // 2
for i in range(n):
v._children[i.grad += v._children\n+i.data * v.grad]
v._children[n+i.grad += v._children\i.data * v.grad]
elif v._op == 'log': v._children[0.grad += (1.0/v._children\0.data) * v.grad]
elif v._op == 'exp': v._children[0.grad += v.data * v.grad]
elif v._op == 'relu': v._children[0.grad += (v.data > 0) * v.grad]
elif v._op[:1\ == 'p':]
p = float(v._op[1:)]
v._children[0.grad += (p * v._children\0.data**(p-1)) * v.grad]
v._children = () # Prune graph
# Initialize the parameters, to store the knowledge of the model
n_layer = 1 # depth of the transformer neural network (number of layers)
n_embd = 16 # width of the network (embedding dimension)
block_size = 16 # maximum context length of the attention window (note: the longest name is 15 characters)
n_head = 4 # number of attention heads
head_dim = n_embd // n_head # derived dimension of each head
matrix = lambda nout, nin, std=0.08: [[Value(random.gauss(0, std)) for _ in range(nin)\ for _ in range(nout)]]
state_dict =
for i in range(n_layer):
state_dict[f'layer.attn_wq'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wk'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wv'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.attn_wo'\ = matrix(n_embd, n_embd)]
state_dict[f'layer.mlp_fc1'\ = matrix(4 * n_embd, n_embd)]
state_dict[f'layer.mlp_fc2'\ = matrix(n_embd, 4 * n_embd)]
params = [p for mat in state_dict.values() for row in mat for p in row\ # flatten params into a single list\Value]
print(f\"num params: \")
# Define the model architecture: a function mapping tokens and parameters to logits over what comes next
# Follow GPT-2, blessed among the GPTs, with minor differences: layernorm -> rmsnorm, no biases, GeLU -> ReLU
def dot(v1, v2):
return Value(sum(x.data * y.data for x, y in zip(v1, v2)), tuple(v1) + tuple(v2), 'dot')
def linear(x, w):
return [dot(wo, x) for wo in w]
def softmax(logits):
max_val = max(val.data for val in logits)
exps = [(val - max_val).exp() for val in logits]
inv_total = sum(exps)**-1
return [e * inv_total for e in exps]
def rmsnorm(x):
ms = sum(xi * xi for xi in x) / len(x)
scale = (ms + 1e-5) ** -0.5
return [xi * scale for xi in x]
def gpt(token_id, pos_id, keys, values):
tok_emb = state_dict['wte'\token_id\ # token embedding]
pos_emb = state_dict['wpe'\pos_id\ # position embedding]
x = [t + p for t, p in zip(tok_emb, pos_emb)\ # joint token and position embedding]
x = rmsnorm(x) # note: not redundant due to backward pass via the residual connection
for li in range(n_layer):
# 1) Multi-head Attention block
x_residual = x
x = rmsnorm(x)
q = linear(x, state_dict[f'layer.attn_wq')]
k = linear(x, state_dict[f'layer.attn_wk')]
v = linear(x, state_dict[f'layer.attn_wv')]
keys[li.append(k)]
values[li.append(v)]
x_attn = []
for h in range(n_head):
hs = h * head_dim
q_h = q[hs:hs+head_dim]
k_h = [ki[hs:hs+head_dim\ for ki in keys\li\]]
v_h = [vi[hs:hs+head_dim\ for vi in values\li\]]
attn_logits = [dot(q_h, k_h[t) / head_dim**0.5 for t in range(len(k_h))]]
attn_weights = softmax(attn_logits)
head_out = [dot(attn_weights, [v_h[t\j\ for t in range(len(v_h))]) for j in range(head_dim)]]
x_attn.extend(head_out)
x = linear(x_attn, state_dict[f'layer.attn_wo')]
x = [a + b for a, b in zip(x, x_residual)]
# 2) MLP block
x_residual = x
x = rmsnorm(x)
x = linear(x, state_dict[f'layer.mlp_fc1')]
x = [xi.relu() for xi in x]
x = linear(x, state_dict[f'layer.mlp_fc2')]
x = [a + b for a, b in zip(x, x_residual)]
logits = linear(x, state_dict['lm_head')]
return logits
# Let there be Adam, the blessed optimizer and its buffers
learning_rate, beta1, beta2, eps_adam = 0.01, 0.85, 0.99, 1e-8
m = [0.0\ * len(params) # first moment buffer]
v = [0.0\ * len(params) # second moment buffer]
# Repeat in sequence
num_steps = 1000 # number of training steps
for step in range(num_steps):
# Take single document, tokenize it, surround it with BOS special token on both sides
doc = docs[step % len(docs)]
tokens = [BOS\ + \uchars.index(ch) for ch in doc\ + \BOS]
n = min(block_size, len(tokens) - 1)
# Forward through the model, accumulating the loss to keep the graph as light as possible
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
loss = Value(0.0)
for pos_id in range(n):
token_id, target_id = tokens[pos_id\, tokens\pos_id + 1]
logits = gpt(token_id, pos_id, keys, values)
probs = softmax(logits)
loss = loss + -probs[target_id.log()]
loss = (1 / n) * loss
# Backward the loss, calculating the gradients with respect to all model parameters
loss.backward()
keys = values = None
# Adam optimizer update: update the model parameters based on the corresponding gradients
lr_t = learning_rate * (1 - step / num_steps)
b1t, b2t = 1 - beta1**(step + 1), 1 - beta2**(step + 1)
for i, p in enumerate(params):
g = p.grad
m[i\ = beta1 * m\i\ + (1 - beta1) * g]
v[i\ = beta2 * v\i\ + (1 - beta2) * g * g]
p.data -= lr_t * (m[i\/b1t) / ((v\i\/b2t)**0.5 + eps_adam)]
p.grad = 0
print(f\"step / | loss \", end='\r')
# Inference: may the model babble back to us
temperature = 0.5 # in (0, 1\, control the \"creativity\" of generated text, low to high]
print(\"\n--- inference (new, hallucinated names) ---\")
for sample_idx in range(20):
keys, values = [[\ for _ in range(n_layer)], \[\ for _ in range(n_layer)]]
token_id = BOS
sample = []
for pos_id in range(block_size):
logits = gpt(token_id, pos_id, keys, values)
probs = softmax([l / temperature for l in logits)]
token_id = random.choices(range(vocab_size), weights=[p.data for p in probs)\0]
if token_id == BOS:
break
sample.append(uchars[token_id)]
print(f\"sample : \")
Best,
Emmett