Skip to content

Commit

Permalink
update task2
Browse files Browse the repository at this point in the history
  • Loading branch information
donnadamus committed Nov 18, 2024
1 parent cbd5c08 commit dd08ffc
Show file tree
Hide file tree
Showing 7 changed files with 138 additions and 43 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Virtual environment
/env
/data
/data/train
/data/valid
/data/test
Binary file added src/eval/__pycache__/task2_utils.cpython-310.pyc
Binary file not shown.
16 changes: 4 additions & 12 deletions src/eval/task2_eval.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,12 @@
### EVALUATION CODE

"""
!gdown --id 13R5JajJ4oh_enkRjfs2fThITGsIMlFX8
!unzip /content/test_set.zip -d /content
"""
import torch
import torch.nn as nn
from timm import create_model
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import os
from PIL import Image
from utils.task2_utils import *
from task2_utils import *


# Transforms
Expand All @@ -25,7 +17,7 @@
])

# Dataset and DataLoader
root_dir = "/content/test_set" # Replace with your dataset path
root_dir = "/Users/marcodonnarumma/Desktop/BottleAROL/data/test_set" # Replace with your dataset path
dataset = BottleRotationDataset(root_dir=root_dir, transform=transform)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

Expand All @@ -39,13 +31,13 @@
from PIL import Image, ImageDraw, ImageFont

# Ensure the output directory exists
output_dir = "/content/predictions"
output_dir = "/Users/marcodonnarumma/Desktop/BottleAROL/data/predictions_task2"
os.makedirs(output_dir, exist_ok=True)

# Function to make predictions, save images with labels, and return predictions and targets
def predict_and_save_images(model, dataloader, output_dir):
model.eval() # Set the model to evaluation mode
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("mps" if torch.cuda.is_available() else "cpu")
model.to(device)

all_predictions = [] # Store all predictions
Expand Down
104 changes: 104 additions & 0 deletions src/eval/task2_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import torch
import torch.nn as nn
from timm import create_model
from torch.utils.data import DataLoader, Dataset
import torchvision.transforms as transforms
import os
from PIL import Image
import pytorch_lightning as pl


# Define the dataset
class BottleRotationDataset(Dataset):
def __init__(self, root_dir, transform=None):
self.image_paths = []
self.labels = []
self.transform = transform
for degree_folder in os.listdir(root_dir):
folder_path = os.path.join(root_dir, degree_folder)
if os.path.isdir(folder_path):
degree = float(degree_folder)
for img_name in os.listdir(folder_path):
self.image_paths.append(os.path.join(folder_path, img_name))
self.labels.append(degree)

def __len__(self):
return len(self.image_paths)

def __getitem__(self, idx):
image = Image.open(self.image_paths[idx]).convert('RGB')
label = self.labels[idx]
if self.transform:
image = self.transform(image)
return image, torch.tensor(label, dtype=torch.float32)

# Define the custom PyTorch Lightning model
class ViTRegression(pl.LightningModule):
def __init__(self, learning_rate=1e-4):
super().__init__()
self.save_hyperparameters()
self.vit = create_model('vit_tiny_patch16_224', pretrained=True)
num_features = self.vit.head.in_features
self.vit.head = nn.Identity() # Remove classification head
self.regressor = nn.Linear(num_features, 1) # Add regression head
self.criterion = nn.MSELoss()

def forward(self, x):
x = self.vit(x)
x = self.regressor(x)
return x

def training_step(self, batch, batch_idx):
images, targets = batch # Targets are now in the range [0, 360]
outputs = self(images).squeeze() # Predictions from the model
loss = self.criterion(outputs, targets) # Compute loss without normalization

# Log the learning rate
current_lr = self.optimizers().param_groups[0]['lr']
self.log('learning_rate', current_lr, on_step=False, on_epoch=True)

# Log only at the end of the epoch
self.log('train_loss', loss, on_step=False, on_epoch=True)

return loss

def configure_optimizers(self):
# Define default values
default_learning_rate = 1e-4
default_scheduler_patience = 5
default_scheduler_factor = 0.1
default_scheduler_threshold = 100 # Adjust to match your loss scale
default_min_lr = 1e-6

# Retrieve hyperparameters or use defaults
learning_rate = self.hparams.get('learning_rate', default_learning_rate)
scheduler_patience = self.hparams.get('scheduler_patience', default_scheduler_patience)
scheduler_factor = self.hparams.get('scheduler_factor', default_scheduler_factor)
scheduler_threshold = self.hparams.get('scheduler_threshold', default_scheduler_threshold)
min_lr = self.hparams.get('min_lr', default_min_lr)

# Define optimizer
optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)

# Define scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode='min', # Minimize the monitored metric
factor=scheduler_factor, # Reduce learning rate by this factor
patience=scheduler_patience, # Wait this many epochs with no improvement
threshold=scheduler_threshold, # Minimum improvement to consider
threshold_mode='abs', # Use absolute threshold for large-scale losses
cooldown=0, # No cooldown period after reduction
min_lr=min_lr # Minimum learning rate allowed
)

# Return optimizer and scheduler
return {
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'monitor': 'train_loss', # Metric to monitor for learning rate adjustment
'interval': 'epoch', # Check at the end of every epoch
'frequency': 1 # Frequency of scheduler updates
}
}
30 changes: 29 additions & 1 deletion src/training/task2_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,38 @@
import torchvision.transforms as transforms
import os
from PIL import Image
from utils.task2_utils import *
from task2_utils import *


# Dataset and DataLoader
root_dir = "/content/dataset_label" # Replace with your dataset path
dataset = BottleRotationDataset(root_dir=root_dir, transform=transform)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Lightning Trainer with Model Checkpoint Callback
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath="checkpoints/",
filename="vit_tiny_rotation_epoch_{epoch}",
save_top_k=-1, # Save all checkpoints
every_n_epochs=5, # Save every 5 epochs
)

from pytorch_lightning.callbacks import ProgressBar

# Custom ProgressBar to ensure logging at the end of the epoch
class CustomProgressBar(ProgressBar):
def __init__(self):
super().__init__()

def on_train_epoch_end(self, trainer, pl_module):
train_loss = trainer.callback_metrics.get("train_loss", None)
learning_rate = trainer.callback_metrics.get("learning_rate", None)
if train_loss:
print(f"Epoch {trainer.current_epoch + 1}: Train Loss = {train_loss:.4f}")

if learning_rate:
print(f"Epoch {trainer.current_epoch + 1}: Learning Rate: {learning_rate:.6f}")

# Trainer configuration
trainer = pl.Trainer(
max_epochs=100,
Expand Down
29 changes: 0 additions & 29 deletions src/utils/task2_utils.py → src/training/task2_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,32 +109,3 @@ def configure_optimizers(self):
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])

# Dataset and DataLoader
root_dir = "/content/dataset_label" # Replace with your dataset path
dataset = BottleRotationDataset(root_dir=root_dir, transform=transform)
dataloader = DataLoader(dataset, batch_size=32, shuffle=True)

# Lightning Trainer with Model Checkpoint Callback
checkpoint_callback = pl.callbacks.ModelCheckpoint(
dirpath="checkpoints/",
filename="vit_tiny_rotation_epoch_{epoch}",
save_top_k=-1, # Save all checkpoints
every_n_epochs=5, # Save every 5 epochs
)

from pytorch_lightning.callbacks import ProgressBar

# Custom ProgressBar to ensure logging at the end of the epoch
class CustomProgressBar(ProgressBar):
def __init__(self):
super().__init__()

def on_train_epoch_end(self, trainer, pl_module):
train_loss = trainer.callback_metrics.get("train_loss", None)
learning_rate = trainer.callback_metrics.get("learning_rate", None)
if train_loss:
print(f"Epoch {trainer.current_epoch + 1}: Train Loss = {train_loss:.4f}")

if learning_rate:
print(f"Epoch {trainer.current_epoch + 1}: Learning Rate: {learning_rate:.6f}")
1 change: 0 additions & 1 deletion src/utils/README.md

This file was deleted.

0 comments on commit dd08ffc

Please sign in to comment.