https://github.com/cran/bayestestR
Raw File
Tip revision: d8462ad2168ad7ee61c0d7e679174e775f01a9be authored by Dominique Makowski on 18 January 2020, 07:10:02 UTC
version 0.5.0
Tip revision: d8462ad
weighted_posteriors.Rd
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weighted_posteriors.R
\name{weighted_posteriors}
\alias{weighted_posteriors}
\alias{weighted_posteriors.data.frame}
\alias{weighted_posteriors.stanreg}
\alias{weighted_posteriors.brmsfit}
\alias{weighted_posteriors.blavaan}
\alias{weighted_posteriors.BFBayesFactor}
\title{Generate posterior distributions weighted across models}
\usage{
weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE)

\method{weighted_posteriors}{data.frame}(..., prior_odds = NULL, missing = 0, verbose = TRUE)

\method{weighted_posteriors}{stanreg}(
  ...,
  prior_odds = NULL,
  missing = 0,
  verbose = TRUE,
  effects = c("fixed", "random", "all"),
  component = c("conditional", "zi", "zero_inflated", "all"),
  parameters = NULL
)

\method{weighted_posteriors}{brmsfit}(
  ...,
  prior_odds = NULL,
  missing = 0,
  verbose = TRUE,
  effects = c("fixed", "random", "all"),
  component = c("conditional", "zi", "zero_inflated", "all"),
  parameters = NULL
)

\method{weighted_posteriors}{blavaan}(
  ...,
  prior_odds = NULL,
  missing = 0,
  verbose = TRUE,
  effects = c("fixed", "random", "all"),
  component = c("conditional", "zi", "zero_inflated", "all"),
  parameters = NULL
)

\method{weighted_posteriors}{BFBayesFactor}(
  ...,
  prior_odds = NULL,
  missing = 0,
  verbose = TRUE,
  iterations = 4000
)
}
\arguments{
\item{...}{Fitted models (see details), all fit on the same data, or a single
\code{BFBayesFactor} object.}

\item{prior_odds}{Optional vector of prior odds for the models compared to
the first model (or the denominator, for \code{BFBayesFactor} objects). For
\code{data.frame}s, this will be used as the basis of weighting.}

\item{missing}{An optional numeric value to use if a model does not contain a
parameter that appears in other models. Defaults to 0.}

\item{verbose}{Toggle off warnings.}

\item{effects}{Should results for fixed effects, random effects or both be
returned? Only applies to mixed models. May be abbreviated.}

\item{component}{Should results for all parameters, parameters for the
conditional model or the zero-inflated part of the model be returned? May
be abbreviated. Only applies to \pkg{brms}-models.}

\item{parameters}{Regular expression pattern that describes the parameters
that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are
filtered by default, so only parameters that typically appear in the
\code{summary()} are returned. Use \code{parameters} to select specific parameters
for the output.}

\item{iterations}{For \code{BayesFactor} models, how many posterior samples to draw.}
}
\value{
A data frame with posterior distributions (weighted across models) .
}
\description{
Extract posterior samples of parameters, weighted across models. Weighting is
done by comparing posterior model probabilities, via \code{\link[=bayesfactor_models]{bayesfactor_models()}}.
}
\details{
Note that across models some parameters might play different roles. For
example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B}
(where it is a main effect) than it does in the model \code{Y ~ A + B + A:B}
(where it is a simple effect). In many cases centering of predictors (mean
subtracting for continuous variables, and effects coding via \code{contr.sum} or
orthonormal coding via \code{\link{contr.equalprior_pairs}} for factors) can reduce this
issue. In any case you should be mindful of this issue.

See \code{\link[=bayesfactor_models]{bayesfactor_models()}} details for more info on passed models.

Note that for \code{BayesFactor} models, posterior samples cannot be generated
from intercept only models.

This function is similar in function to \code{brms::posterior_average}.
}
\note{
For \verb{BayesFactor < 0.9.12-4.3}, in some instances there might be
some problems of duplicate columns of random effects in the resulting data
frame.
}
\examples{
\donttest{
if (require("rstanarm") && require("see") && interactive()) {
  stan_m0 <- suppressWarnings(stan_glm(extra ~ 1,
    data = sleep,
    family = gaussian(),
    refresh = 0,
    diagnostic_file = file.path(tempdir(), "df0.csv")
  ))

  stan_m1 <- suppressWarnings(stan_glm(extra ~ group,
    data = sleep,
    family = gaussian(),
    refresh = 0,
    diagnostic_file = file.path(tempdir(), "df1.csv")
  ))

  res <- weighted_posteriors(stan_m0, stan_m1, verbose = FALSE)

  plot(eti(res))
}

## With BayesFactor
if (require("BayesFactor")) {
  extra_sleep <- ttestBF(formula = extra ~ group, data = sleep)

  wp <- weighted_posteriors(extra_sleep, verbose = FALSE)

  describe_posterior(extra_sleep, test = NULL, verbose = FALSE)
  # also considers the null
  describe_posterior(wp$delta, test = NULL, verbose = FALSE)
}


## weighted prediction distributions via data.frames
if (require("rstanarm") && interactive()) {
  m0 <- suppressWarnings(stan_glm(
    mpg ~ 1,
    data = mtcars,
    family = gaussian(),
    diagnostic_file = file.path(tempdir(), "df0.csv"),
    refresh = 0
  ))

  m1 <- suppressWarnings(stan_glm(
    mpg ~ carb,
    data = mtcars,
    family = gaussian(),
    diagnostic_file = file.path(tempdir(), "df1.csv"),
    refresh = 0
  ))

  # Predictions:
  pred_m0 <- data.frame(posterior_predict(m0))
  pred_m1 <- data.frame(posterior_predict(m1))

  BFmods <- bayesfactor_models(m0, m1, verbose = FALSE)

  wp <- weighted_posteriors(
    pred_m0, pred_m1,
    prior_odds = as.numeric(BFmods)[2],
    verbose = FALSE
  )

  # look at first 5 prediction intervals
  hdi(pred_m0[1:5])
  hdi(pred_m1[1:5])
  hdi(wp[1:5]) # between, but closer to pred_m1
}
}
}
\references{
\itemize{
\item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via
orthogonalized model mixing. Journal of the American Statistical
Association, 91(435), 1197-1208.

\item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E.
(2019, March 25). A conceptual introduction to Bayesian Model Averaging.
\doi{10.31234/osf.io/wgb64}

\item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian
inference for psychology, part IV: Parameter estimation and Bayes factors.
Psychonomic bulletin & review, 25(1), 102-113.

\item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers,
E. J. (2019). A cautionary note on estimating effect size.
}
}
\seealso{
\code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} for Bayesian model averaging.
}
back to top