Skip to content

Commit

Permalink
Add Monotonic function toy example
Browse files Browse the repository at this point in the history
  • Loading branch information
AWehenkel committed Nov 21, 2019
1 parent ab21b2d commit d7ffa74
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 20 deletions.
74 changes: 56 additions & 18 deletions MonotonicMLP.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,47 @@
import torch
import argparse
import torch.nn as nn
import matplotlib.pyplot as plt
from models.UMNN import MonotonicNN, IntegrandNN

def f(x_1, x_2, x_3):
return .001*(x_1**3 + x_1) + x_2 ** 2 + torch.sin(x_3)

def create_dataset(n_samples):
x = torch.randn(n_samples, 3)
y = x[:, 0]**3 + x[:, 1]**2 + torch.sin(x[:, 2])
y = f(x[:, 0], x[:, 1], x[:, 2])
return x, y

class MLP(nn.Module):
def __init__(self, in_d, hidden_layers):
super(MLP, self).__init__()
self.net = []
hs = [in_d] + hidden_layers + [1]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)

def forward(self, x, h):
return self.net(torch.cat((x, h), 1))

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument("-nb_train", default=10000, type=int, help="Number of training samples")
parser.add_argument("-nb_train", default=20000, type=int, help="Number of training samples")
parser.add_argument("-nb_test", default=1000, type=int, help="Number of testing samples")
parser.add_argument("-nb_epoch", default=100, type=int, help="Number of training epochs")
parser.add_argument("-load", default=False, action="store_true", help="Load a model ?")
parser.add_argument("-folder", default="", help="Folder")
args = parser.parse_args()

device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = MonotonicNN(3, [100, 100, 100], nb_steps=20, dev=device).to(device)
optim = torch.optim.Adam(model.parameters(), 1e-3, weight_decay=1e-5)
model_monotonic = MonotonicNN(3, [100, 100, 100], nb_steps=50, dev=device).to(device)
model_mlp = MLP(3, [200, 200, 200]).to(device)
optim_monotonic = torch.optim.Adam(model_monotonic.parameters(), 1e-3, weight_decay=1e-5)
optim_mlp = torch.optim.Adam(model_mlp.parameters(), 1e-3, weight_decay=1e-5)

train_x, train_y = create_dataset(args.nb_train)
test_x, test_y = create_dataset(args.nb_test)
Expand All @@ -31,25 +52,42 @@ def create_dataset(n_samples):
idx = torch.randperm(args.nb_train)
train_x = train_x[idx].to(device)
train_y = train_y[idx].to(device)
avg_loss = 0.
avg_loss_mon = 0.
avg_loss_mlp = 0.
for i in range(0, args.nb_train-b_size, b_size):
# Monotonic
x = train_x[i:i + b_size].requires_grad_()
y = train_y[i:i + b_size].requires_grad_()
y_pred = model(x[:, [0]], x[:, 1:])[:, 0]
y_pred = model_monotonic(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y)**2).sum()
optim.zero_grad()
optim_monotonic.zero_grad()
loss.backward()
optim.step()
avg_loss += loss.item()
print("train:", epoch, avg_loss / (i + b_size))
avg_loss = 0.
for i in range(0, args.nb_test-b_size, b_size):
x = test_x[i:i + b_size]
y = test_y[i:i + b_size]
y_pred = model(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y)**2).sum()
avg_loss += loss.item()
print("test:", epoch, avg_loss / (i + b_size))
optim_monotonic.step()
avg_loss_mon += loss.item()
# MLP
y_pred = model_mlp(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y) ** 2).sum()
optim_mlp.zero_grad()
loss.backward()
optim_mlp.step()
avg_loss_mlp += loss.item()

print(epoch)
print("\tMLP: ", avg_loss_mlp/args.nb_train)
print("\tMonotonic: ", avg_loss_mon / args.nb_train)

# <<TEST>>
x = torch.arange(-5, 5, .1).unsqueeze(1)
h = torch.zeros(x.shape[0], 2)
y = f(x[:, 0], h[:, 0], h[:, 1]).numpy()
y_mon = model_monotonic(x, h)[:, 0]
y_mlp = model_mlp(x, h)[:, 0]
plt.plot(x.detach().numpy(), y_mon.detach().numpy(), label="Monotonic model")
plt.plot(x.detach().numpy(), y_mlp.detach().numpy(), label="MLP model")
plt.plot(x.numpy(), y, label="groundtruth")
plt.legend()
plt.show()
plt.savefig("Monotonicity.png")



17 changes: 15 additions & 2 deletions models/UMNN/MonotonicNN.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import torch.nn as nn
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral


def _flatten(sequence):
Expand Down Expand Up @@ -29,7 +30,16 @@ class MonotonicNN(nn.Module):
def __init__(self, in_d, hidden_layers, nb_steps=50, dev="cpu"):
super(MonotonicNN, self).__init__()
self.integrand = IntegrandNN(in_d, hidden_layers)
self.offset = nn.Parameter(torch.tensor(.0))
self.net = []
hs = [in_d-1] + hidden_layers + [2]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
# It will output the scaling and offset factors.
self.net = nn.Sequential(*self.net)
self.device = dev
self.nb_steps = nb_steps

Expand All @@ -38,4 +48,7 @@ def __init__(self, in_d, hidden_layers, nb_steps=50, dev="cpu"):
'''
def forward(self, x, h):
x0 = torch.zeros(x.shape).to(self.device)
return NeuralIntegral.apply(x0, x, self.integrand, _flatten(self.integrand.parameters()), h, self.nb_steps) + self.offset
out = self.net(h)
offset = out[:, [0]]
scaling = out[:, [1]]
return scaling*ParallelNeuralIntegral.apply(x0, x, self.integrand, _flatten(self.integrand.parameters()), h, self.nb_steps) + offset

0 comments on commit d7ffa74

Please sign in to comment.