Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Gpytorch integration - variational GP #122

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pinot/app/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def _batch(x):
results[metric.__name__][state_name] = (
metric(
self.net,
g,
pinot.metrics._independent(self.net.condition(g)),
y,
*_args,
sampler=self.sampler,
Expand Down
1 change: 0 additions & 1 deletion pinot/net.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,6 @@ def condition(self, g, *args, **kwargs):
h_last = self.representation(self.g_last)
kwargs = {**{"x_tr": h_last, "y_tr": self.y_last}, **kwargs}


if sampler is not None and hasattr(sampler, 'sample_params'):
sampler.sample_params()

Expand Down
4 changes: 3 additions & 1 deletion pinot/regressors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,6 @@

from pinot.regressors.neural_network_regressor import *
from pinot.regressors.gaussian_process_regressor import *
from pinot.regressors.biophysical_regressor import BiophysicalRegressor
from pinot.regressors.biophysical_regressor import BiophysicalRegressor

from pinot.regressors.gpytorch_regressor import VariationalGP
81 changes: 81 additions & 0 deletions pinot/regressors/gpytorch_regressor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import torch
import math
import gpytorch
from gpytorch.variational import CholeskyVariationalDistribution
from gpytorch.variational import VariationalStrategy
from gpytorch.models import ApproximateGP


class VariationalGP(ApproximateGP):
def __init__(self, in_features, num_data, inducing_points=None, num_inducing_points=100,
mean=None, covar=None, beta=1.0):
"""
:param in_features: dimension of the input to GP layer
:param num_data: total number of training samples
:param inducing_points: inducing points, second dimension should be the same as in_features
:param num_inducing_points: number of inducing points, if inducing_points are not given, will be used
to randomly initialize inducing points
:param mean: gpytorch.means.Mean
:param covar: gpytorch.kernels.Kernel
:param beta: the relative weight of the KL term in ELBO
"""
if inducing_points is None:
# Randomly initialize inducing points
inducing_points = torch.rand(num_inducing_points, in_features)

variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(-2)
)

# LMCVariationalStrategy for introducing correlation among tasks if we want MultiTaskGP
variational_strategy = VariationalStrategy(
self, inducing_points, variational_distribution, learn_inducing_locations=True
)
super(VariationalGP, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.LinearMean(in_features) if mean is None else mean
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(
math.exp(-1), math.exp(1), sigma=0.1, transform=torch.exp
)
)
) if covar is None else covar
self.num_inducing = inducing_points.size(-2)
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
self.num_data = num_data
self.beta = beta

def forward(self, h):
"""
Computes the GP prior
:param h:
:return:
"""
mean_x = self.mean_module(h)
covar_x = self.covar_module(h)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)

def loss(self, h, y, *args, **kwargs):
"""
Computes the negative ELBO
:param h: learned features of graphs, shape (n, in_features)
:param y: labels of graphs, shape (n,)
:return:
"""
approximate_dist_f = self(h, *args, **kwargs)
num_batch = approximate_dist_f.event_shape[0]
# Compute the log-likelihood and the KL divergence, following the same steps as in function
# forward() of _ApproximateMarginalLogLikelihood
log_likelihood = self.likelihood.expected_log_prob(y, approximate_dist_f, **kwargs).sum(-1).div(num_batch)
kl_divergence = self.variational_strategy.kl_divergence().div(self.num_data).mul(self.beta)
elbo = log_likelihood - kl_divergence
return -(elbo.sum())

def condition(self, x, *args, **kwargs):
""" Computes the predictive distribution over y* given x*
:param x: learned features of graphs, shape (n, in_features)
:return:
"""
dist_f = self(x, *args, **kwargs)
dist_y = self.likelihood(dist_f)
return dist_y
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ torch
matplotlib
pandas
numpy
gpytorch
188 changes: 77 additions & 111 deletions scripts/gp/gp_playground_gpytorch.ipynb

Large diffs are not rendered by default.

Loading