Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

task 3 with wandb #27

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions plr_exercise.egg-info/PKG-INFO
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Metadata-Version: 2.1
Name: plr_exercise
Version: 1.0.0
Summary: A small example package
Author: Jonas Frey
Author-email: [email protected]
Requires-Python: >=3.7
License-File: LICENSE
Requires-Dist: numpy
Requires-Dist: torch>=1.21
9 changes: 9 additions & 0 deletions plr_exercise.egg-info/SOURCES.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
LICENSE
README.md
setup.py
plr_exercise/__init__.py
plr_exercise.egg-info/PKG-INFO
plr_exercise.egg-info/SOURCES.txt
plr_exercise.egg-info/dependency_links.txt
plr_exercise.egg-info/requires.txt
plr_exercise.egg-info/top_level.txt
1 change: 1 addition & 0 deletions plr_exercise.egg-info/dependency_links.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
https://download.pytorch.org/whl/torch-2.1.0+cu121-cp38-cp38-linux_x86_64.whl
2 changes: 2 additions & 0 deletions plr_exercise.egg-info/requires.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
numpy
torch>=1.21
1 change: 1 addition & 0 deletions plr_exercise.egg-info/top_level.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
plr_exercise
59 changes: 41 additions & 18 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,25 @@
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import wandb


run = wandb.init(
project="mon projet",
config={
"learning_rate": 0.01,
"epochs": 10,

},
)


class Net(nn.Module):
def __init__(self):


super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d( 32, 64, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
Expand Down Expand Up @@ -43,7 +53,7 @@ def train(args, model, device, train_loader, optimizer, epoch):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss( output, target)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
Expand All @@ -52,7 +62,7 @@ def train(args, model, device, train_loader, optimizer, epoch):
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
Expand All @@ -70,17 +80,21 @@ def test(model, device, test_loader, epoch):

data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss( output, target, reduction="sum").item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred) ).sum().item()
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction="sum").item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()

test_loss /= len(test_loader.dataset )
test_loss /= len(test_loader.dataset)

print(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset), 100.0 * correct / len(test_loader.dataset)
test_loss, correct, len(
test_loader.dataset), 100.0 * correct / len(test_loader.dataset)
)
)
wandb.log({"Loss": test_loss, "epoch": epoch})


def main():
Expand All @@ -92,20 +106,27 @@ def main():
parser.add_argument(
"--test-batch-size", type=int, default=1000, metavar="N", help="input batch size for testing (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=2, metavar="N", help="number of epochs to train (default: 14)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument("--epochs", type=int, default=2, metavar="N",
help="number of epochs to train (default: 14)")
parser.add_argument("--lr", type=float, default=1.0,
metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7,
metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--no-cuda", action="store_true",
default=False, help="disables CUDA training")
parser.add_argument("--dry-run", action="store_true",
default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=1,
metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--save-model", action="store_true", default=False, help="For Saving the current Model")
parser.add_argument("--save-model", action="store_true",
default=False, help="For Saving the current Model")
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()

Expand All @@ -123,8 +144,10 @@ def main():
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset1 = datasets.MNIST("../data", train=True,
download=True, transform=transform)
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
Expand Down
1 change: 1 addition & 0 deletions wandb/debug-internal.log
1 change: 1 addition & 0 deletions wandb/debug.log
1 change: 1 addition & 0 deletions wandb/latest-run
35 changes: 35 additions & 0 deletions wandb/run-20240306_104616-pufbvux4/files/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
wandb_version: 1

learning_rate:
desc: null
value: 0.01
epochs:
desc: null
value: 10
_wandb:
desc: null
value:
python_version: 3.11.5
cli_version: 0.16.4
framework: torch
is_jupyter_run: false
is_kaggle_kernel: false
start_time: 1709718376.0
t:
1:
- 1
- 41
- 55
2:
- 1
- 41
- 55
3:
- 16
- 23
4: 3.11.5
5: 0.16.4
8:
- 4
- 5
13: darwin-arm64
190 changes: 190 additions & 0 deletions wandb/run-20240306_104616-pufbvux4/files/output.log
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
Train Epoch: 0 [0/60000 (0%)] Loss: 2.329474
Train Epoch: 0 [640/60000 (1%)] Loss: 3.163749
Train Epoch: 0 [1280/60000 (2%)] Loss: 2.810723
Train Epoch: 0 [1920/60000 (3%)] Loss: 2.462003
Train Epoch: 0 [2560/60000 (4%)] Loss: 2.393042
Train Epoch: 0 [3200/60000 (5%)] Loss: 727.100647
Train Epoch: 0 [3840/60000 (6%)] Loss: 2.365071
Train Epoch: 0 [4480/60000 (7%)] Loss: 2.351858
Train Epoch: 0 [5120/60000 (9%)] Loss: 2.322984
Train Epoch: 0 [5760/60000 (10%)] Loss: 2.334341
Train Epoch: 0 [6400/60000 (11%)] Loss: 2.350302
Train Epoch: 0 [7040/60000 (12%)] Loss: 2.328267
Train Epoch: 0 [7680/60000 (13%)] Loss: 2.351661
Train Epoch: 0 [8320/60000 (14%)] Loss: 2.312091
Train Epoch: 0 [8960/60000 (15%)] Loss: 2.401574
Train Epoch: 0 [9600/60000 (16%)] Loss: 2.336918
Train Epoch: 0 [10240/60000 (17%)] Loss: 2.294845
Train Epoch: 0 [10880/60000 (18%)] Loss: 2.343280
Train Epoch: 0 [11520/60000 (19%)] Loss: 2.362715
Train Epoch: 0 [12160/60000 (20%)] Loss: 2.298522
Train Epoch: 0 [12800/60000 (21%)] Loss: 2.286546
Train Epoch: 0 [13440/60000 (22%)] Loss: 2.339040
Train Epoch: 0 [14080/60000 (23%)] Loss: 2.336880
Train Epoch: 0 [14720/60000 (25%)] Loss: 2.348043
Train Epoch: 0 [15360/60000 (26%)] Loss: 2.330961
Train Epoch: 0 [16000/60000 (27%)] Loss: 2.320678
Train Epoch: 0 [16640/60000 (28%)] Loss: 2.373642
Train Epoch: 0 [17280/60000 (29%)] Loss: 2.362350
Train Epoch: 0 [17920/60000 (30%)] Loss: 2.285732
Train Epoch: 0 [18560/60000 (31%)] Loss: 2.387622
Train Epoch: 0 [19200/60000 (32%)] Loss: 2.334290
Train Epoch: 0 [19840/60000 (33%)] Loss: 2.351022
Train Epoch: 0 [20480/60000 (34%)] Loss: 2.451580
Train Epoch: 0 [21120/60000 (35%)] Loss: 2.432407
Train Epoch: 0 [21760/60000 (36%)] Loss: 2.357752
Train Epoch: 0 [22400/60000 (37%)] Loss: 2.384685
Train Epoch: 0 [23040/60000 (38%)] Loss: 2.421635
Train Epoch: 0 [23680/60000 (39%)] Loss: 2.375398
Train Epoch: 0 [24320/60000 (41%)] Loss: 2.312469
Train Epoch: 0 [24960/60000 (42%)] Loss: 2.293869
Train Epoch: 0 [25600/60000 (43%)] Loss: 2.306481
Train Epoch: 0 [26240/60000 (44%)] Loss: 2.339118
Train Epoch: 0 [26880/60000 (45%)] Loss: 2.329201
Train Epoch: 0 [27520/60000 (46%)] Loss: 2.402405
Train Epoch: 0 [28160/60000 (47%)] Loss: 2.362206
Train Epoch: 0 [28800/60000 (48%)] Loss: 2.346026
Train Epoch: 0 [29440/60000 (49%)] Loss: 2.359084
Train Epoch: 0 [30080/60000 (50%)] Loss: 2.382750
Train Epoch: 0 [30720/60000 (51%)] Loss: 2.302515
Train Epoch: 0 [31360/60000 (52%)] Loss: 2.340575
Train Epoch: 0 [32000/60000 (53%)] Loss: 2.352647
Train Epoch: 0 [32640/60000 (54%)] Loss: 2.329711
Train Epoch: 0 [33280/60000 (55%)] Loss: 2.331157
Train Epoch: 0 [33920/60000 (57%)] Loss: 2.365947
Train Epoch: 0 [34560/60000 (58%)] Loss: 2.310303
Train Epoch: 0 [35200/60000 (59%)] Loss: 2.403242
Train Epoch: 0 [35840/60000 (60%)] Loss: 2.366431
Train Epoch: 0 [36480/60000 (61%)] Loss: 2.329873
Train Epoch: 0 [37120/60000 (62%)] Loss: 2.329306
Train Epoch: 0 [37760/60000 (63%)] Loss: 2.313803
Train Epoch: 0 [38400/60000 (64%)] Loss: 2.392514
Train Epoch: 0 [39040/60000 (65%)] Loss: 2.323086
Train Epoch: 0 [39680/60000 (66%)] Loss: 2.471573
Train Epoch: 0 [40320/60000 (67%)] Loss: 3.164428
Train Epoch: 0 [40960/60000 (68%)] Loss: 2.648396
Train Epoch: 0 [41600/60000 (69%)] Loss: 2.387139
Train Epoch: 0 [42240/60000 (70%)] Loss: 2.344292
Train Epoch: 0 [42880/60000 (71%)] Loss: 2.378800
Train Epoch: 0 [43520/60000 (72%)] Loss: 2.317856
Train Epoch: 0 [44160/60000 (74%)] Loss: 2.365886
Train Epoch: 0 [44800/60000 (75%)] Loss: 2.275558
Train Epoch: 0 [45440/60000 (76%)] Loss: 2.385407
Train Epoch: 0 [46080/60000 (77%)] Loss: 2.352269
Train Epoch: 0 [46720/60000 (78%)] Loss: 2.331173
Train Epoch: 0 [47360/60000 (79%)] Loss: 2.333803
Train Epoch: 0 [48000/60000 (80%)] Loss: 2.368239
Train Epoch: 0 [48640/60000 (81%)] Loss: 2.323156
Train Epoch: 0 [49280/60000 (82%)] Loss: 2.348527
Train Epoch: 0 [49920/60000 (83%)] Loss: 2.382334
Train Epoch: 0 [50560/60000 (84%)] Loss: 2.492201
Train Epoch: 0 [51200/60000 (85%)] Loss: 2.390026
Train Epoch: 0 [51840/60000 (86%)] Loss: 2.466917
Train Epoch: 0 [52480/60000 (87%)] Loss: 2.296052
Train Epoch: 0 [53120/60000 (88%)] Loss: 2.334232
Train Epoch: 0 [53760/60000 (90%)] Loss: 2.384785
Train Epoch: 0 [54400/60000 (91%)] Loss: 2.349325
Train Epoch: 0 [55040/60000 (92%)] Loss: 2.294627
Train Epoch: 0 [55680/60000 (93%)] Loss: 2.314354
Train Epoch: 0 [56320/60000 (94%)] Loss: 2.313751
Train Epoch: 0 [56960/60000 (95%)] Loss: 2.326008
Train Epoch: 0 [57600/60000 (96%)] Loss: 2.256036
Train Epoch: 0 [58240/60000 (97%)] Loss: 2.343266
Train Epoch: 0 [58880/60000 (98%)] Loss: 2.317806
Train Epoch: 0 [59520/60000 (99%)] Loss: 2.336383
Test set: Average loss: 2.3195, Accuracy: 1135/10000 (11%)
Train Epoch: 1 [0/60000 (0%)] Loss: 2.308809
Train Epoch: 1 [640/60000 (1%)] Loss: 2.314591
Train Epoch: 1 [1280/60000 (2%)] Loss: 2.356146
Train Epoch: 1 [1920/60000 (3%)] Loss: 2.341244
Train Epoch: 1 [2560/60000 (4%)] Loss: 2.321081
Train Epoch: 1 [3200/60000 (5%)] Loss: 2.308159
Train Epoch: 1 [3840/60000 (6%)] Loss: 2.312081
Train Epoch: 1 [4480/60000 (7%)] Loss: 2.357321
Train Epoch: 1 [5120/60000 (9%)] Loss: 2.317735
Train Epoch: 1 [5760/60000 (10%)] Loss: 2.331640
Train Epoch: 1 [6400/60000 (11%)] Loss: 2.342279
Train Epoch: 1 [7040/60000 (12%)] Loss: 2.320646
Train Epoch: 1 [7680/60000 (13%)] Loss: 2.380904
Train Epoch: 1 [8320/60000 (14%)] Loss: 2.305196
Train Epoch: 1 [8960/60000 (15%)] Loss: 2.394487
Train Epoch: 1 [9600/60000 (16%)] Loss: 2.340882
Train Epoch: 1 [10240/60000 (17%)] Loss: 2.319427
Train Epoch: 1 [10880/60000 (18%)] Loss: 2.332401
Train Epoch: 1 [11520/60000 (19%)] Loss: 2.360700
Train Epoch: 1 [12160/60000 (20%)] Loss: 2.303836
Train Epoch: 1 [12800/60000 (21%)] Loss: 2.277171
Train Epoch: 1 [13440/60000 (22%)] Loss: 2.339409
Train Epoch: 1 [14080/60000 (23%)] Loss: 2.283450
Train Epoch: 1 [14720/60000 (25%)] Loss: 2.319676
Train Epoch: 1 [15360/60000 (26%)] Loss: 2.313021
Train Epoch: 1 [16000/60000 (27%)] Loss: 2.321848
Train Epoch: 1 [16640/60000 (28%)] Loss: 2.348344
Train Epoch: 1 [17280/60000 (29%)] Loss: 2.369822
Train Epoch: 1 [17920/60000 (30%)] Loss: 2.285888
Train Epoch: 1 [18560/60000 (31%)] Loss: 2.375418
Train Epoch: 1 [19200/60000 (32%)] Loss: 2.317253
Train Epoch: 1 [19840/60000 (33%)] Loss: 2.306999
Train Epoch: 1 [20480/60000 (34%)] Loss: 2.406102
Train Epoch: 1 [21120/60000 (35%)] Loss: 2.388851
Train Epoch: 1 [21760/60000 (36%)] Loss: 2.360869
Train Epoch: 1 [22400/60000 (37%)] Loss: 2.338306
Train Epoch: 1 [23040/60000 (38%)] Loss: 2.380447
Train Epoch: 1 [23680/60000 (39%)] Loss: 2.347673
Train Epoch: 1 [24320/60000 (41%)] Loss: 2.299994
Train Epoch: 1 [24960/60000 (42%)] Loss: 2.299256
Train Epoch: 1 [25600/60000 (43%)] Loss: 2.298962
Train Epoch: 1 [26240/60000 (44%)] Loss: 2.312130
Train Epoch: 1 [26880/60000 (45%)] Loss: 2.297455
Train Epoch: 1 [27520/60000 (46%)] Loss: 2.376775
Train Epoch: 1 [28160/60000 (47%)] Loss: 2.319507
Train Epoch: 1 [28800/60000 (48%)] Loss: 2.295763
Train Epoch: 1 [29440/60000 (49%)] Loss: 2.335815
Train Epoch: 1 [30080/60000 (50%)] Loss: 2.386428
Train Epoch: 1 [30720/60000 (51%)] Loss: 2.337291
Train Epoch: 1 [31360/60000 (52%)] Loss: 2.318209
Train Epoch: 1 [32000/60000 (53%)] Loss: 2.345703
Train Epoch: 1 [32640/60000 (54%)] Loss: 2.323367
Train Epoch: 1 [33280/60000 (55%)] Loss: 2.324066
Train Epoch: 1 [33920/60000 (57%)] Loss: 2.332751
Train Epoch: 1 [34560/60000 (58%)] Loss: 2.294739
Train Epoch: 1 [35200/60000 (59%)] Loss: 2.370771
Train Epoch: 1 [35840/60000 (60%)] Loss: 2.352789
Train Epoch: 1 [36480/60000 (61%)] Loss: 2.306558
Train Epoch: 1 [37120/60000 (62%)] Loss: 2.318676
Train Epoch: 1 [37760/60000 (63%)] Loss: 2.304147
Train Epoch: 1 [38400/60000 (64%)] Loss: 2.341470
Train Epoch: 1 [39040/60000 (65%)] Loss: 2.317718
Train Epoch: 1 [39680/60000 (66%)] Loss: 2.329848
Train Epoch: 1 [40320/60000 (67%)] Loss: 2.358087
Train Epoch: 1 [40960/60000 (68%)] Loss: 2.280651
Train Epoch: 1 [41600/60000 (69%)] Loss: 2.342746
Train Epoch: 1 [42240/60000 (70%)] Loss: 2.318500
Train Epoch: 1 [42880/60000 (71%)] Loss: 2.359805
Train Epoch: 1 [43520/60000 (72%)] Loss: 2.327282
Train Epoch: 1 [44160/60000 (74%)] Loss: 2.355948
Train Epoch: 1 [44800/60000 (75%)] Loss: 2.277532
Train Epoch: 1 [45440/60000 (76%)] Loss: 2.362204
Train Epoch: 1 [46080/60000 (77%)] Loss: 2.353934
Train Epoch: 1 [46720/60000 (78%)] Loss: 2.374333
Train Epoch: 1 [47360/60000 (79%)] Loss: 2.362021
Train Epoch: 1 [48000/60000 (80%)] Loss: 2.352453
Train Epoch: 1 [48640/60000 (81%)] Loss: 2.329662
Train Epoch: 1 [49280/60000 (82%)] Loss: 2.343026
Train Epoch: 1 [49920/60000 (83%)] Loss: 2.345965
Train Epoch: 1 [50560/60000 (84%)] Loss: 2.438956
Train Epoch: 1 [51200/60000 (85%)] Loss: 2.365172
Train Epoch: 1 [51840/60000 (86%)] Loss: 2.411402
Train Epoch: 1 [52480/60000 (87%)] Loss: 2.321453
Train Epoch: 1 [53120/60000 (88%)] Loss: 2.325870
Train Epoch: 1 [53760/60000 (90%)] Loss: 2.372273
Train Epoch: 1 [54400/60000 (91%)] Loss: 2.340661
Train Epoch: 1 [55040/60000 (92%)] Loss: 2.308806
Train Epoch: 1 [55680/60000 (93%)] Loss: 2.310355
Train Epoch: 1 [56320/60000 (94%)] Loss: 2.310350
Train Epoch: 1 [56960/60000 (95%)] Loss: 2.312412
Train Epoch: 1 [57600/60000 (96%)] Loss: 2.227252
Train Epoch: 1 [58240/60000 (97%)] Loss: 2.308315
Train Epoch: 1 [58880/60000 (98%)] Loss: 2.317189
Train Epoch: 1 [59520/60000 (99%)] Loss: 2.317034
Test set: Average loss: 2.3126, Accuracy: 1135/10000 (11%)
Loading