Revision 48270681afc13081094f7f398a1e194c6b07ba9b authored by vdutor on 03 January 2018, 17:44:53 UTC, committed by Mark van der Wilk on 03 January 2018, 17:44:53 UTC
* Outline of new expectations code.

* Quadrature code now uses TensorFlow shape inference.

* General expectations work.

* Expectations RBF kern, not tested

* Add Identity mean function

* General unittests for Expectations

* Add multipledispatch package to travis

* Update tests_expectations

* Expectations of mean functions

* Mean function uncertain conditional

* Uncertain conditional with mean_function. Tested.

* Support for Add and Prod kernels and quadrature fallback decorator

* Refactor expectations unittests

* Psi stats Linear kernel

* Split expectations in different files

* Expectation Linear kernel and Linear mean function

* Remove None's from expectations api

* Removed old ekernels framework

* Add multipledispatch to setup file

* Work on PR feedback, not finished

* Addressed PR feedback

* Support for pairwise xKxz

* Enable expectations unittests

* Renamed `TimeseriesGaussian` to `MarkovGaussian` and added tests.

* Rename some variable, plus note for later test of <x Kxz>_q.

* Update conditionals.py

Add comment

* Change order of inputs to (feat, kern)

* Stef/expectations (#601)

* adding gaussmarkov quad

* don't override the markvogaussian in the quadrature

* can't test

* adding external test

* quadrature code done and works for MarkovGauss

* MarkovGaussian with quad implemented. All tests pass

* Shape comments.

* Removed superfluous autoflow functions for kernel expectations

* Update kernels.py

* Update quadrature.py
1 parent 2182bf0
Raw File
priors.py
# Copyright 2016 Valentine Svensson, James Hensman, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


from __future__ import absolute_import
import tensorflow as tf
import numpy as np

from . import densities
from . import settings

from .params import Parameterized
from .core.base import IPrior


class Prior(Parameterized, IPrior):  # pylint: disable=W0223
    pass


class Gaussian(Prior):
    def __init__(self, mu, var):
        Prior.__init__(self)
        self.mu = np.atleast_1d(np.array(mu, settings.float_type))
        self.var = np.atleast_1d(np.array(var, settings.float_type))

    def logp(self, x):
        return tf.reduce_sum(densities.gaussian(x, self.mu, self.var))

    def sample(self, shape=(1,)):
        return self.mu + np.sqrt(self.var)*np.random.randn(*shape)

    def __str__(self):
        return "N("+str(self.mu) + "," + str(self.var) + ")"


class LogNormal(Prior):
    def __init__(self, mu, var):
        Prior.__init__(self)
        self.mu = np.atleast_1d(np.array(mu, settings.float_type))
        self.var = np.atleast_1d(np.array(var, settings.float_type))

    def logp(self, x):
        return tf.reduce_sum(densities.lognormal(x, self.mu, self.var))

    def sample(self, shape=(1,)):
        return np.exp(self.mu + np.sqrt(self.var) * np.random.randn(*shape))

    def __str__(self):
        return "logN("+str(self.mu) + "," + str(self.var) + ")"


class Gamma(Prior):
    def __init__(self, shape, scale):
        Prior.__init__(self)
        self.shape = np.atleast_1d(np.array(shape, settings.float_type))
        self.scale = np.atleast_1d(np.array(scale, settings.float_type))

    def logp(self, x):
        return tf.reduce_sum(densities.gamma(self.shape, self.scale, x))

    def sample(self, shape=(1,)):
        return np.random.gamma(self.shape, self.scale, size=shape)

    def __str__(self):
        return "Ga("+str(self.shape) + "," + str(self.scale) + ")"


class Laplace(Prior):
    def __init__(self, mu, sigma):
        Prior.__init__(self)
        self.mu = np.atleast_1d(np.array(mu, settings.float_type))
        self.sigma = np.atleast_1d(np.array(sigma, settings.float_type))

    def logp(self, x):
        return tf.reduce_sum(densities.laplace(self.mu, self.sigma, x))

    def sample(self, shape=(1,)):
        return np.random.laplace(self.mu, self.sigma, size=shape)

    def __str__(self):
        return "Lap.("+str(self.mu) + "," + str(self.sigma) + ")"


class Beta(Prior):
    def __init__(self, a, b):
        Prior.__init__(self)
        self.a = np.atleast_1d(np.array(a, settings.float_type))
        self.b = np.atleast_1d(np.array(b, settings.float_type))

    def logp(self, x):
        return tf.reduce_sum(densities.beta(self.a, self.b, x))

    def sample(self, shape=(1,)):
        return np.random.beta(self.a, self.b, size=shape)

    def __str__(self):
        return "Beta(" + str(self.a) + "," + str(self.b) + ")"


class Uniform(Prior):
    def __init__(self, lower=0., upper=1.):
        Prior.__init__(self)
        self.log_height = - np.log(upper - lower)
        self.lower, self.upper = lower, upper

    def logp(self, x):
        return self.log_height * tf.cast(tf.size(x), settings.float_type)

    def sample(self, shape=(1,)):
        return (self.lower +
                (self.upper - self.lower)*np.random.rand(*shape))

    def __str__(self):
        return "U("+str(self.lower) + "," + str(self.upper) + ")"
back to top