-
Notifications
You must be signed in to change notification settings - Fork 0
/
mlp.py
96 lines (77 loc) · 2.76 KB
/
mlp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset
import pandas as pd
# Load the data
# Load data
filepath = 'E:\GranuBeaker\savedata\gb_particledata'
df = pd.read_feather(filepath, columns=None, use_threads=True)
df = df.dropna(axis=0)
X = df.drop(['no_particles', 'packing_fraction'], axis=1).to_numpy()
y = df['packing_fraction'].to_numpy()
# Standardize the features
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Convert to PyTorch tensors
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.float32).view(-1, 1) # Reshape y to be a column vector
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Create DataLoader for batching
train_data = TensorDataset(X_train, y_train)
test_data = TensorDataset(X_test, y_test)
train_loader = DataLoader(train_data, batch_size=16, shuffle=True)
test_loader = DataLoader(test_data, batch_size=16)
# Define the MLP model for regression
class MLP(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLP, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1) # Output one value for regression
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
# Define the model
input_size = X.shape[1]
hidden_size = 64
model = MLP(input_size, hidden_size)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Train the model
num_epochs = 1000
for epoch in range(num_epochs):
for batch in train_loader:
inputs, labels = batch
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, labels)
# Backward pass and optimization
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# Evaluate the model
model.eval() # Set the model to evaluation mode
with torch.no_grad():
total_loss = 0
for batch in test_loader:
inputs, labels = batch
outputs = model(inputs)
loss = criterion(outputs, labels)
total_loss += loss.item()
avg_loss = total_loss / len(test_loader)
print(f'Test Loss: {avg_loss:.4f}')
# Save the model
torch.save(model.state_dict(), 'mlp_model.pth')
print("Model saved to mlp_model.pth")