# Copyright 2016 James Hensman, Valentine Svensson, alexggmatthews, Mark van der Wilk # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union import numpy as np import tensorflow as tf from .. import kullback_leiblers from ..conditionals import conditional from ..covariances import Kuu from .model import GPModel, Data, GPPosterior from ..base import Parameter, positive, triangular from ..config import default_float, default_jitter from .util import inducingpoint_wrapper class SVGP(GPModel): """ This is the Sparse Variational GP (SVGP). The key reference is :: @inproceedings{hensman2014scalable, title={Scalable Variational Gaussian Process Classification}, author={Hensman, James and Matthews, Alexander G. de G. and Ghahramani, Zoubin}, booktitle={Proceedings of AISTATS}, year={2015} } """ def __init__(self, kernel, likelihood, inducing_variable, mean_function = None, *, num_latent: int = 1, q_diag: bool = False, q_mu = None, q_sqrt = None, whiten: bool = True, num_data = None): """ - kernel, likelihood, inducing_variables, mean_function are appropriate GPflow objects - num_latent is the number of latent processes to use, defaults to 1 - q_diag is a boolean. If True, the covariance is approximated by a diagonal matrix. - whiten is a boolean. If True, we use the whitened representation of the inducing points. - num_data is the total number of observations, defaults to X.shape[0] (relevant when feeding in external minibatches) """ # init the super class, accept args super().__init__(kernel, likelihood, mean_function) self.num_data = num_data self.q_diag = q_diag self.whiten = whiten self.inducing_variable = inducingpoint_wrapper(inducing_variable) self.num_latent = num_latent # init variational parameters num_inducing = len(self.inducing_variable) self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag) def _init_variational_parameters(self, num_inducing, q_mu, q_sqrt, q_diag): """ Constructs the mean and cholesky of the covariance of the variational Gaussian posterior. If a user passes values for `q_mu` and `q_sqrt` the routine checks if they have consistent and correct shapes. If a user does not specify any values for `q_mu` and `q_sqrt`, the routine initializes them, their shape depends on `num_inducing` and `q_diag`. Note: most often the comments refer to the number of observations (=output dimensions) with P, number of latent GPs with L, and number of inducing points M. Typically P equals L, but when certain multioutput kernels are used, this can change. Parameters ---------- :param num_inducing: int Number of inducing variables, typically refered to as M. :param q_mu: np.array or None Mean of the variational Gaussian posterior. If None the function will initialise the mean with zeros. If not None, the shape of `q_mu` is checked. :param q_sqrt: np.array or None Cholesky of the covariance of the variational Gaussian posterior. If None the function will initialise `q_sqrt` with identity matrix. If not None, the shape of `q_sqrt` is checked, depending on `q_diag`. :param q_diag: bool Used to check if `q_mu` and `q_sqrt` have the correct shape or to construct them with the correct shape. If `q_diag` is true, `q_sqrt` is two dimensional and only holds the square root of the covariance diagonal elements. If False, `q_sqrt` is three dimensional. """ q_mu = np.zeros( (num_inducing, self.num_latent)) if q_mu is None else q_mu self.q_mu = Parameter(q_mu, dtype=default_float()) # [M, P] if q_sqrt is None: if self.q_diag: ones = np.ones((num_inducing, self.num_latent), dtype=default_float()) self.q_sqrt = Parameter(ones, transform=positive()) # [M, P] else: q_sqrt = [ np.eye(num_inducing, dtype=default_float()) for _ in range(self.num_latent) ] q_sqrt = np.array(q_sqrt) self.q_sqrt = Parameter(q_sqrt, transform=triangular()) # [P, M, M] else: if q_diag: assert q_sqrt.ndim == 2 self.num_latent = q_sqrt.shape[1] self.q_sqrt = Parameter(q_sqrt, transform=positive()) # [M, L|P] else: assert q_sqrt.ndim == 3 self.num_latent = q_sqrt.shape[0] num_inducing = q_sqrt.shape[1] self.q_sqrt = Parameter(q_sqrt, transform=triangular()) # [L|P, M, M] def prior_kl(self): return kullback_leiblers.prior_kl(self.inducing_variable, self.kernel, self.q_mu, self.q_sqrt, whiten=self.whiten) def elbo(self, data: Data) -> tf.Tensor: """ This gives a variational bound on the model likelihood. """ X, Y = data kl = self.prior_kl() f_mean, f_var = conditional(X, self.inducing_variable, self.kernel, self.q_mu, q_sqrt=self.q_sqrt, full_cov=False, white=self.whiten, full_output_cov=False) f_mean += self.mean_function(X) var_exp = self.likelihood.variational_expectations(f_mean, f_var, Y) if self.num_data is not None: num_data = tf.cast(self.num_data, kl.dtype) minibatch_size = tf.cast(X.shape[0], kl.dtype) scale = num_data / minibatch_size else: scale = tf.cast(1.0, kl.dtype) return tf.reduce_sum(var_exp) * scale - kl def objective(self, data: Data) -> tf.Tensor: """ This returns the (negative) evidence lower bound (ELBO) on the log marginal likelihood. """ return -self.elbo(data) def get_posterior(self, data: Data) -> GPPosterior: """ """ return GPPosterior(mean_function=self.mean_function, kernel=self.kernel, likelihood=self.likelihood, inducing_variable=self.inducing_variable, whiten=self.whiten, mean=self.q_mu.numpy(), variance_sqrt=self.q_sqrt.numpy())