GPyTorch

1.15.2 · active · verified Sun Apr 12

GPyTorch is a Gaussian Process (GP) library built on PyTorch, designed for scalable, flexible, and modular GP models. It leverages PyTorch's capabilities for GPU acceleration and automatic differentiation, making it suitable for modern machine learning workflows. GPyTorch frequently releases maintenance updates and new features, with major versions aligning with PyTorch releases.

Warnings

Install

Imports

Quickstart

This quickstart demonstrates a simple exact Gaussian Process regression. It defines a GP model, a Gaussian likelihood, trains the model using the marginal log likelihood, and then makes predictions including confidence intervals.

import math
import torch
import gpytorch
from gpytorch.models import ExactGP
from gpytorch.likelihoods import GaussianLikelihood
from gpytorch.means import ConstantMean
from gpytorch.kernels import ScaleKernel, RBFKernel
from gpytorch.distributions import MultivariateNormal
from torch.optim import Adam

# 1. Set up training data
train_x = torch.linspace(0, 1, 100)
train_y = torch.sin(train_x * (2 * math.pi)) + torch.randn(train_x.size()) * math.sqrt(0.04)

# 2. Define the GP model
class ExactGPModel(ExactGP):
    def __init__(self, train_x, train_y, likelihood):
        super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
        self.mean_module = ConstantMean()
        self.covar_module = ScaleKernel(RBFKernel())

    def forward(self, x):
        mean_x = self.mean_module(x)
        covar_x = self.covar_module(x)
        return MultivariateNormal(mean_x, covar_x)

# Initialize likelihood and model
likelihood = GaussianLikelihood()
model = ExactGPModel(train_x, train_y, likelihood)

# 3. Train the model
# Put model and likelihood in training mode
model.train()
likelihood.train()

# Use the Adam optimizer
optimizer = Adam(model.parameters(), lr=0.1)

# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

for i in range(50): # typically 50 training iterations
    optimizer.zero_grad()
    output = model(train_x)
    loss = -mll(output, train_y)
    loss.backward()
    optimizer.step()

# 4. Make predictions
model.eval()
likelihood.eval()

with torch.no_grad(), gpytorch.settings.fast_pred_var():
    test_x = torch.linspace(0, 1, 51)
    observed_pred = likelihood(model(test_x))
    mean = observed_pred.mean
    lower, upper = observed_pred.confidence_region()

view raw JSON →