Documented and Unit Tested educational Deep Learning framework with Autograd from scratch.
neuralforge/
: Framework with python files.
neuralforge/tensor_operations.py
: File with the Tensor
class and all of the tensor Operations
.neuralforge/utils.py
: File with operations and helper functions.neuralforge/nn/
: Submodule of the framework. Contains full layers and optimizers.
neuralforge/nn/nn.py
: Most deep learning layers, and nn.Module
class.neuralforge/nn/optim.py
: File with optimizers.data/
: Folder to store training data. Currently holds shakespeare.txt
.test/
: Folder with unit tests. Contains test_framework.py
.setup.py
: Setup file for the framework.import neuralforge as forge
# Instantiate Tensors:
x = forge.randn((8,4,5))
w = forge.randn((8,5,4), requires_grad = True)
b = forge.randint((4), requires_grad = True)
# Make calculations:
out = x @ w
out += b
# Compute gradients on whole graph:
out.backward()
# Get gradients from specific Tensors:
print(w.grad)
print(b.grad)
import neuralforge as forge
import neuralforge.nn as nn
# Implement Transformer class inheriting from forge.nn.Module:
class Transformer(nn.Module):
def __init__(self, vocab_size: int, hidden_size: int, n_timesteps: int, n_heads: int, p: float):
super().__init__()
# Instantiate Transformer's Layers:
self.embed = nn.Embedding(vocab_size, hidden_size)
self.pos_embed = nn.PositionalEmbedding(n_timesteps, hidden_size)
self.b1 = nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_prob=p)
self.b2 = nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_prob=p)
self.ln = nn.LayerNorm(hidden_size)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, x):
z = self.embed(x) + self.pos_embed(x)
z = self.b1(z)
z = self.b2(z)
z = self.ln(z)
z = self.linear(z)
return z
# Get tiny Shakespeare test data:
text = load_text_data(f'{PATH}/data/shakespeare.txt')
# Create Transformer instance:
model = Transformer(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p)
# Define loss function and optimizer:
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01, reg=0)
# Training Loop:
for _ in range(n_iters):
x, y = get_batch(test_data, n_timesteps, batch_size)
z = model.forward(x)
# Get loss:
loss = loss_func(z, y)
# Backpropagate the loss using forge.tensor's backward() method:
loss.backward()
# Update the weights:
optimizer.step()
# Reset the gradients to zero after each training step:
optimizer.zero_grad()
Note: You can install the framework locally with:
pip install neuralforge
requirements.txt
.pip install -r requirements.txt
Note: The framework is built around numpy, so there is no CUDA availability.