Revision 48270681afc13081094f7f398a1e194c6b07ba9b authored by vdutor on 03 January 2018, 17:44:53 UTC, committed by Mark van der Wilk on 03 January 2018, 17:44:53 UTC
* Outline of new expectations code. * Quadrature code now uses TensorFlow shape inference. * General expectations work. * Expectations RBF kern, not tested * Add Identity mean function * General unittests for Expectations * Add multipledispatch package to travis * Update tests_expectations * Expectations of mean functions * Mean function uncertain conditional * Uncertain conditional with mean_function. Tested. * Support for Add and Prod kernels and quadrature fallback decorator * Refactor expectations unittests * Psi stats Linear kernel * Split expectations in different files * Expectation Linear kernel and Linear mean function * Remove None's from expectations api * Removed old ekernels framework * Add multipledispatch to setup file * Work on PR feedback, not finished * Addressed PR feedback * Support for pairwise xKxz * Enable expectations unittests * Renamed `TimeseriesGaussian` to `MarkovGaussian` and added tests. * Rename some variable, plus note for later test of <x Kxz>_q. * Update conditionals.py Add comment * Change order of inputs to (feat, kern) * Stef/expectations (#601) * adding gaussmarkov quad * don't override the markvogaussian in the quadrature * can't test * adding external test * quadrature code done and works for MarkovGauss * MarkovGaussian with quad implemented. All tests pass * Shape comments. * Removed superfluous autoflow functions for kernel expectations * Update kernels.py * Update quadrature.py
1 parent 2182bf0
quadrature.py
from __future__ import print_function, absolute_import
import itertools
import numpy as np
import tensorflow as tf
from . import settings
from .core.errors import GPflowError
def hermgauss(n):
x, w = np.polynomial.hermite.hermgauss(n)
x, w = x.astype(settings.float_type), w.astype(settings.float_type)
return x, w
def mvhermgauss(H, D):
"""
Return the evaluation locations 'xn', and weights 'wn' for a multivariate
Gauss-Hermite quadrature.
The outputs can be used to approximate the following type of integral:
int exp(-x)*f(x) dx ~ sum_i w[i,:]*f(x[i,:])
:param H: Number of Gauss-Hermite evaluation points.
:param D: Number of input dimensions. Needs to be known at call-time.
:return: eval_locations 'x' (H**DxD), weights 'w' (H**D)
"""
gh_x, gh_w = hermgauss(H)
x = np.array(list(itertools.product(*(gh_x,) * D))) # H**DxD
w = np.prod(np.array(list(itertools.product(*(gh_w,) * D))), 1) # H**D
return x, w
def mvnquad(func, means, covs, H, Din=None, Dout=None):
"""
Computes N Gaussian expectation integrals of a single function 'f'
using Gauss-Hermite quadrature.
:param f: integrand function. Takes one input of shape ?xD.
:param means: NxD
:param covs: NxDxD
:param H: Number of Gauss-Hermite evaluation points.
:param Din: Number of input dimensions. Needs to be known at call-time.
:param Dout: Number of output dimensions. Defaults to (). Dout is assumed
to leave out the item index, i.e. f actually maps (?xD)->(?x*Dout).
:return: quadratures (N,*Dout)
"""
# Figure out input shape information
if Din is None:
Din = means.shape[1] if type(means.shape) is tuple else means.shape[1].value
if Din is None:
raise GPflowError("If `Din` is passed as `None`, `means` must have a known shape. "
"Running mvnquad in `autoflow` without specifying `Din` and `Dout` "
"is problematic. Consider using your own session.") # pragma: no cover
xn, wn = mvhermgauss(H, Din)
N = tf.shape(means)[0]
# transform points based on Gaussian parameters
cholXcov = tf.cholesky(covs) # NxDxD
Xt = tf.matmul(cholXcov, tf.tile(xn[None, :, :], (N, 1, 1)), transpose_b=True) # NxDxH**D
X = 2.0 ** 0.5 * Xt + tf.expand_dims(means, 2) # NxDxH**D
Xr = tf.reshape(tf.transpose(X, [2, 0, 1]), (-1, Din)) # (H**D*N)xD
# perform quadrature
fevals = func(Xr)
if Dout is None:
Dout = tuple((d if type(d) is int else d.value) for d in fevals.shape[1:])
if any([d is None for d in Dout]):
raise GPflowError("If `Dout` is passed as `None`, the output of `func` must have known "
"shape. Running mvnquad in `autoflow` without specifying `Din` and `Dout` "
"is problematic. Consider using your own session.") # pragma: no cover
fX = tf.reshape(fevals, (H ** Din, N,) + Dout)
wr = np.reshape(wn * np.pi ** (-Din * 0.5),
(-1,) + (1,) * (1 + len(Dout)))
return tf.reduce_sum(fX * wr, 0)
Computing file changes ...