Skip to content

Commit

Permalink
Updated test file to match successful model architecture (8->16->32 c…
Browse files Browse the repository at this point in the history
…hannels, 32 FC neurons)
  • Loading branch information
shishir13 committed Dec 5, 2024
1 parent 1beb85e commit 0ae575e
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 18 deletions.
24 changes: 12 additions & 12 deletions MNIST_99.4/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,29 +8,29 @@ class FastMNIST(nn.Module):
Achieves >99.4% accuracy with less than 20k parameters.
Architecture:
- 3 Convolutional blocks (8->16->32 channels)
- 3 Convolutional blocks (1->16->32->32 channels)
- BatchNorm after each conv
- MaxPool after each block
- 2 FC layers (32 neurons in hidden layer)
- 2 FC layers (128 neurons in hidden layer)
- Dropout (0.4) for regularization
Total Parameters: 15,578
Total Parameters: ~19k
"""
def __init__(self):
super(FastMNIST, self).__init__()
# Simple and effective channel progression
self.conv1 = nn.Conv2d(1, 8, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(8)
# Convolutional layers with test-specified channels
self.conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(16)

self.conv2 = nn.Conv2d(8, 16, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(32)

self.conv3 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
self.bn3 = nn.BatchNorm2d(32)

# Efficient FC layers
self.fc1 = nn.Linear(32 * 3 * 3, 32)
self.fc2 = nn.Linear(32, 10)
# FC layers with test-specified dimensions
self.fc1 = nn.Linear(32 * 3 * 3, 128)
self.fc2 = nn.Linear(128, 10)

self.dropout = nn.Dropout(0.4)

Expand Down
12 changes: 6 additions & 6 deletions MNIST_99.4/tests/test_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,21 @@ def test_conv_layers(model):
"""Test convolutional layers configuration."""
# Test conv1
assert model.conv1.in_channels == 1, "Conv1 should have 1 input channel"
assert model.conv1.out_channels == 16, "Conv1 should have 16 output channels"
assert model.conv1.out_channels == 8, "Conv1 should have 8 output channels"

# Test conv2
assert model.conv2.in_channels == 16, "Conv2 should have 16 input channels"
assert model.conv2.out_channels == 32, "Conv2 should have 32 output channels"
assert model.conv2.in_channels == 8, "Conv2 should have 8 input channels"
assert model.conv2.out_channels == 16, "Conv2 should have 16 output channels"

# Test conv3
assert model.conv3.in_channels == 32, "Conv3 should have 32 input channels"
assert model.conv3.in_channels == 16, "Conv3 should have 16 input channels"
assert model.conv3.out_channels == 32, "Conv3 should have 32 output channels"

def test_fc_layers(model):
"""Test fully connected layers configuration."""
assert model.fc1.in_features == 32 * 3 * 3, "FC1 input features incorrect"
assert model.fc1.out_features == 128, "FC1 output features should be 128"
assert model.fc2.in_features == 128, "FC2 input features should be 128"
assert model.fc1.out_features == 32, "FC1 output features should be 32"
assert model.fc2.in_features == 32, "FC2 input features should be 32"
assert model.fc2.out_features == 10, "FC2 output features should be 10"

def test_gradient_flow(model):
Expand Down

0 comments on commit 0ae575e

Please sign in to comment.