https://github.com/cran/MADPop
Raw File
Tip revision: be291479202d9ca826914b9bf0fe0b8efa26e6c3 authored by Martin Lysy on 22 August 2022, 08:20:12 UTC
version 1.1.4
Tip revision: be29147
stanExports_DirichletMultinomial.h
// Generated by rstantools.  Do not edit by hand.

/*
    MADPop is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    MADPop is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with MADPop.  If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef MODELS_HPP
#define MODELS_HPP
#define STAN__SERVICES__COMMAND_HPP
#include <rstan/rstaninc.hpp>
// Code generated by Stan version 2.21.0
#include <stan/model/model_header.hpp>
namespace model_DirichletMultinomial_namespace {
using std::istream;
using std::string;
using std::stringstream;
using std::vector;
using stan::io::dump;
using stan::math::lgamma;
using stan::model::prob_grad;
using namespace stan::math;
static int current_statement_begin__;
stan::io::program_reader prog_reader__() {
    stan::io::program_reader reader;
    reader.add_event(0, 0, "start", "model_DirichletMultinomial");
    reader.add_event(87, 85, "end", "model_DirichletMultinomial");
    return reader;
}
template <bool propto, typename T1__>
typename boost::math::tools::promote_args<T1__>::type
dirichlet_multinomial_lpmf(const std::vector<int>& x,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) {
    typedef typename boost::math::tools::promote_args<T1__>::type local_scalar_t__;
    typedef local_scalar_t__ fun_return_scalar_t__;
    const static bool propto__ = true;
    (void) propto__;
        local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN());
        (void) DUMMY_VAR__;  // suppress unused var warning
    int current_statement_begin__ = -1;
    try {
        {
        current_statement_begin__ = 19;
        local_scalar_t__ ans(DUMMY_VAR__);
        (void) ans;  // dummy to suppress unused var warning
        stan::math::initialize(ans, DUMMY_VAR__);
        stan::math::fill(ans, DUMMY_VAR__);
        current_statement_begin__ = 20;
        stan::math::assign(ans, 0.0);
        current_statement_begin__ = 21;
        for (int ii = 1; ii <= num_elements(x); ++ii) {
            current_statement_begin__ = 22;
            stan::math::assign(ans, (ans + (stan::math::lgamma((get_base1(x, ii, "x", 1) + get_base1(eta, ii, "eta", 1))) - stan::math::lgamma(get_base1(eta, ii, "eta", 1)))));
        }
        current_statement_begin__ = 24;
        return stan::math::promote_scalar<fun_return_scalar_t__>(((ans + stan::math::lgamma(sum(eta))) - stan::math::lgamma((sum(x) + sum(eta)))));
        }
    } catch (const std::exception& e) {
        stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__());
        // Next line prevents compiler griping about no return
        throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***");
    }
}
template <typename T1__>
typename boost::math::tools::promote_args<T1__>::type
dirichlet_multinomial_lpmf(const std::vector<int>& x,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) {
    return dirichlet_multinomial_lpmf<false>(x,eta, pstream__);
}
struct dirichlet_multinomial_lpmf_functor__ {
    template <bool propto, typename T1__>
        typename boost::math::tools::promote_args<T1__>::type
    operator()(const std::vector<int>& x,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) const {
        return dirichlet_multinomial_lpmf(x, eta, pstream__);
    }
};
template <bool propto, typename T1__>
typename boost::math::tools::promote_args<T1__>::type
Dirichlet_Multinomial_lpmf(const std::vector<std::vector<int> >& X,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) {
    typedef typename boost::math::tools::promote_args<T1__>::type local_scalar_t__;
    typedef local_scalar_t__ fun_return_scalar_t__;
    const static bool propto__ = true;
    (void) propto__;
        local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN());
        (void) DUMMY_VAR__;  // suppress unused var warning
    int current_statement_begin__ = -1;
    try {
        {
        current_statement_begin__ = 29;
        validate_non_negative_index("D", "2", 2);
        std::vector<int  > D(2, int(0));
        stan::math::fill(D, std::numeric_limits<int>::min());
        current_statement_begin__ = 30;
        local_scalar_t__ ans(DUMMY_VAR__);
        (void) ans;  // dummy to suppress unused var warning
        stan::math::initialize(ans, DUMMY_VAR__);
        stan::math::fill(ans, DUMMY_VAR__);
        current_statement_begin__ = 31;
        local_scalar_t__ seta(DUMMY_VAR__);
        (void) seta;  // dummy to suppress unused var warning
        stan::math::initialize(seta, DUMMY_VAR__);
        stan::math::fill(seta, DUMMY_VAR__);
        current_statement_begin__ = 32;
        local_scalar_t__ slgeta(DUMMY_VAR__);
        (void) slgeta;  // dummy to suppress unused var warning
        stan::math::initialize(slgeta, DUMMY_VAR__);
        stan::math::fill(slgeta, DUMMY_VAR__);
        current_statement_begin__ = 33;
        stan::math::assign(D, dims(X));
        current_statement_begin__ = 35;
        stan::math::assign(seta, sum(eta));
        current_statement_begin__ = 36;
        stan::math::assign(slgeta, 0.0);
        current_statement_begin__ = 37;
        for (int jj = 1; jj <= get_base1(D, 2, "D", 1); ++jj) {
            current_statement_begin__ = 38;
            stan::math::assign(slgeta, (slgeta + stan::math::lgamma(get_base1(eta, jj, "eta", 1))));
        }
        current_statement_begin__ = 41;
        stan::math::assign(ans, 0.0);
        current_statement_begin__ = 42;
        for (int ii = 1; ii <= get_base1(D, 1, "D", 1); ++ii) {
            current_statement_begin__ = 43;
            for (int jj = 1; jj <= get_base1(D, 2, "D", 1); ++jj) {
                current_statement_begin__ = 44;
                stan::math::assign(ans, (ans + stan::math::lgamma((get_base1(get_base1(X, ii, "X", 1), jj, "X", 2) + get_base1(eta, jj, "eta", 1)))));
            }
            current_statement_begin__ = 46;
            stan::math::assign(ans, (ans - stan::math::lgamma((sum(get_base1(X, ii, "X", 1)) + seta))));
        }
        current_statement_begin__ = 48;
        stan::math::assign(ans, (ans + (get_base1(D, 1, "D", 1) * (stan::math::lgamma(seta) - slgeta))));
        current_statement_begin__ = 49;
        return stan::math::promote_scalar<fun_return_scalar_t__>(ans);
        }
    } catch (const std::exception& e) {
        stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__());
        // Next line prevents compiler griping about no return
        throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***");
    }
}
template <typename T1__>
typename boost::math::tools::promote_args<T1__>::type
Dirichlet_Multinomial_lpmf(const std::vector<std::vector<int> >& X,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) {
    return Dirichlet_Multinomial_lpmf<false>(X,eta, pstream__);
}
struct Dirichlet_Multinomial_lpmf_functor__ {
    template <bool propto, typename T1__>
        typename boost::math::tools::promote_args<T1__>::type
    operator()(const std::vector<std::vector<int> >& X,
                               const Eigen::Matrix<T1__, Eigen::Dynamic, 1>& eta, std::ostream* pstream__) const {
        return Dirichlet_Multinomial_lpmf(X, eta, pstream__);
    }
};
#include <stan_meta_header.hpp>
class model_DirichletMultinomial
  : public stan::model::model_base_crtp<model_DirichletMultinomial> {
private:
        int nG;
        int nL;
        std::vector<std::vector<int> > X;
        int nLrho;
        std::vector<int> iLrho;
public:
    model_DirichletMultinomial(stan::io::var_context& context__,
        std::ostream* pstream__ = 0)
        : model_base_crtp(0) {
        ctor_body(context__, 0, pstream__);
    }
    model_DirichletMultinomial(stan::io::var_context& context__,
        unsigned int random_seed__,
        std::ostream* pstream__ = 0)
        : model_base_crtp(0) {
        ctor_body(context__, random_seed__, pstream__);
    }
    void ctor_body(stan::io::var_context& context__,
                   unsigned int random_seed__,
                   std::ostream* pstream__) {
        typedef double local_scalar_t__;
        boost::ecuyer1988 base_rng__ =
          stan::services::util::create_rng(random_seed__, 0);
        (void) base_rng__;  // suppress unused var warning
        current_statement_begin__ = -1;
        static const char* function__ = "model_DirichletMultinomial_namespace::model_DirichletMultinomial";
        (void) function__;  // dummy to suppress unused var warning
        size_t pos__;
        (void) pos__;  // dummy to suppress unused var warning
        std::vector<int> vals_i__;
        std::vector<double> vals_r__;
        local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN());
        (void) DUMMY_VAR__;  // suppress unused var warning
        try {
            // initialize data block variables from context__
            current_statement_begin__ = 54;
            context__.validate_dims("data initialization", "nG", "int", context__.to_vec());
            nG = int(0);
            vals_i__ = context__.vals_i("nG");
            pos__ = 0;
            nG = vals_i__[pos__++];
            check_greater_or_equal(function__, "nG", nG, 1);
            current_statement_begin__ = 55;
            context__.validate_dims("data initialization", "nL", "int", context__.to_vec());
            nL = int(0);
            vals_i__ = context__.vals_i("nL");
            pos__ = 0;
            nL = vals_i__[pos__++];
            check_greater_or_equal(function__, "nL", nL, 1);
            current_statement_begin__ = 56;
            validate_non_negative_index("X", "nL", nL);
            validate_non_negative_index("X", "nG", nG);
            context__.validate_dims("data initialization", "X", "int", context__.to_vec(nL,nG));
            X = std::vector<std::vector<int> >(nL, std::vector<int>(nG, int(0)));
            vals_i__ = context__.vals_i("X");
            pos__ = 0;
            size_t X_k_0_max__ = nL;
            size_t X_k_1_max__ = nG;
            for (size_t k_1__ = 0; k_1__ < X_k_1_max__; ++k_1__) {
                for (size_t k_0__ = 0; k_0__ < X_k_0_max__; ++k_0__) {
                    X[k_0__][k_1__] = vals_i__[pos__++];
                }
            }
            size_t X_i_0_max__ = nL;
            size_t X_i_1_max__ = nG;
            for (size_t i_0__ = 0; i_0__ < X_i_0_max__; ++i_0__) {
                for (size_t i_1__ = 0; i_1__ < X_i_1_max__; ++i_1__) {
                    check_greater_or_equal(function__, "X[i_0__][i_1__]", X[i_0__][i_1__], 0);
                }
            }
            current_statement_begin__ = 58;
            context__.validate_dims("data initialization", "nLrho", "int", context__.to_vec());
            nLrho = int(0);
            vals_i__ = context__.vals_i("nLrho");
            pos__ = 0;
            nLrho = vals_i__[pos__++];
            check_greater_or_equal(function__, "nLrho", nLrho, 0);
            check_less_or_equal(function__, "nLrho", nLrho, nL);
            current_statement_begin__ = 59;
            validate_non_negative_index("iLrho", "nLrho", nLrho);
            context__.validate_dims("data initialization", "iLrho", "int", context__.to_vec(nLrho));
            iLrho = std::vector<int>(nLrho, int(0));
            vals_i__ = context__.vals_i("iLrho");
            pos__ = 0;
            size_t iLrho_k_0_max__ = nLrho;
            for (size_t k_0__ = 0; k_0__ < iLrho_k_0_max__; ++k_0__) {
                iLrho[k_0__] = vals_i__[pos__++];
            }
            size_t iLrho_i_0_max__ = nLrho;
            for (size_t i_0__ = 0; i_0__ < iLrho_i_0_max__; ++i_0__) {
                check_greater_or_equal(function__, "iLrho[i_0__]", iLrho[i_0__], 1);
                check_less_or_equal(function__, "iLrho[i_0__]", iLrho[i_0__], nL);
            }
            // initialize transformed data variables
            // execute transformed data statements
            // validate transformed data
            // validate, set parameter ranges
            num_params_r__ = 0U;
            param_ranges_i__.clear();
            current_statement_begin__ = 63;
            validate_non_negative_index("alpha", "nG", nG);
            num_params_r__ += (nG - 1);
            current_statement_begin__ = 64;
            num_params_r__ += 1;
        } catch (const std::exception& e) {
            stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__());
            // Next line prevents compiler griping about no return
            throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***");
        }
    }
    ~model_DirichletMultinomial() { }
    void transform_inits(const stan::io::var_context& context__,
                         std::vector<int>& params_i__,
                         std::vector<double>& params_r__,
                         std::ostream* pstream__) const {
        typedef double local_scalar_t__;
        stan::io::writer<double> writer__(params_r__, params_i__);
        size_t pos__;
        (void) pos__; // dummy call to supress warning
        std::vector<double> vals_r__;
        std::vector<int> vals_i__;
        current_statement_begin__ = 63;
        if (!(context__.contains_r("alpha")))
            stan::lang::rethrow_located(std::runtime_error(std::string("Variable alpha missing")), current_statement_begin__, prog_reader__());
        vals_r__ = context__.vals_r("alpha");
        pos__ = 0U;
        validate_non_negative_index("alpha", "nG", nG);
        context__.validate_dims("parameter initialization", "alpha", "vector_d", context__.to_vec(nG));
        Eigen::Matrix<double, Eigen::Dynamic, 1> alpha(nG);
        size_t alpha_j_1_max__ = nG;
        for (size_t j_1__ = 0; j_1__ < alpha_j_1_max__; ++j_1__) {
            alpha(j_1__) = vals_r__[pos__++];
        }
        try {
            writer__.simplex_unconstrain(alpha);
        } catch (const std::exception& e) {
            stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()), current_statement_begin__, prog_reader__());
        }
        current_statement_begin__ = 64;
        if (!(context__.contains_r("eta")))
            stan::lang::rethrow_located(std::runtime_error(std::string("Variable eta missing")), current_statement_begin__, prog_reader__());
        vals_r__ = context__.vals_r("eta");
        pos__ = 0U;
        context__.validate_dims("parameter initialization", "eta", "double", context__.to_vec());
        double eta(0);
        eta = vals_r__[pos__++];
        try {
            writer__.scalar_lb_unconstrain(0, eta);
        } catch (const std::exception& e) {
            stan::lang::rethrow_located(std::runtime_error(std::string("Error transforming variable eta: ") + e.what()), current_statement_begin__, prog_reader__());
        }
        params_r__ = writer__.data_r();
        params_i__ = writer__.data_i();
    }
    void transform_inits(const stan::io::var_context& context,
                         Eigen::Matrix<double, Eigen::Dynamic, 1>& params_r,
                         std::ostream* pstream__) const {
      std::vector<double> params_r_vec;
      std::vector<int> params_i_vec;
      transform_inits(context, params_i_vec, params_r_vec, pstream__);
      params_r.resize(params_r_vec.size());
      for (int i = 0; i < params_r.size(); ++i)
        params_r(i) = params_r_vec[i];
    }
    template <bool propto__, bool jacobian__, typename T__>
    T__ log_prob(std::vector<T__>& params_r__,
                 std::vector<int>& params_i__,
                 std::ostream* pstream__ = 0) const {
        typedef T__ local_scalar_t__;
        local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN());
        (void) DUMMY_VAR__;  // dummy to suppress unused var warning
        T__ lp__(0.0);
        stan::math::accumulator<T__> lp_accum__;
        try {
            stan::io::reader<local_scalar_t__> in__(params_r__, params_i__);
            // model parameters
            current_statement_begin__ = 63;
            Eigen::Matrix<local_scalar_t__, Eigen::Dynamic, 1> alpha;
            (void) alpha;  // dummy to suppress unused var warning
            if (jacobian__)
                alpha = in__.simplex_constrain(nG, lp__);
            else
                alpha = in__.simplex_constrain(nG);
            current_statement_begin__ = 64;
            local_scalar_t__ eta;
            (void) eta;  // dummy to suppress unused var warning
            if (jacobian__)
                eta = in__.scalar_lb_constrain(0, lp__);
            else
                eta = in__.scalar_lb_constrain(0);
            // model body
            current_statement_begin__ = 70;
            lp_accum__.add(Dirichlet_Multinomial_lpmf<propto__>(X, multiply(eta, alpha), pstream__));
            current_statement_begin__ = 74;
            lp_accum__.add((-(2) * stan::math::log((1 + eta))));
        } catch (const std::exception& e) {
            stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__());
            // Next line prevents compiler griping about no return
            throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***");
        }
        lp_accum__.add(lp__);
        return lp_accum__.sum();
    } // log_prob()
    template <bool propto, bool jacobian, typename T_>
    T_ log_prob(Eigen::Matrix<T_,Eigen::Dynamic,1>& params_r,
               std::ostream* pstream = 0) const {
      std::vector<T_> vec_params_r;
      vec_params_r.reserve(params_r.size());
      for (int i = 0; i < params_r.size(); ++i)
        vec_params_r.push_back(params_r(i));
      std::vector<int> vec_params_i;
      return log_prob<propto,jacobian,T_>(vec_params_r, vec_params_i, pstream);
    }
    void get_param_names(std::vector<std::string>& names__) const {
        names__.resize(0);
        names__.push_back("alpha");
        names__.push_back("eta");
        names__.push_back("rho");
    }
    void get_dims(std::vector<std::vector<size_t> >& dimss__) const {
        dimss__.resize(0);
        std::vector<size_t> dims__;
        dims__.resize(0);
        dims__.push_back(nG);
        dimss__.push_back(dims__);
        dims__.resize(0);
        dimss__.push_back(dims__);
        dims__.resize(0);
        dims__.push_back(nLrho);
        dims__.push_back(nG);
        dimss__.push_back(dims__);
    }
    template <typename RNG>
    void write_array(RNG& base_rng__,
                     std::vector<double>& params_r__,
                     std::vector<int>& params_i__,
                     std::vector<double>& vars__,
                     bool include_tparams__ = true,
                     bool include_gqs__ = true,
                     std::ostream* pstream__ = 0) const {
        typedef double local_scalar_t__;
        vars__.resize(0);
        stan::io::reader<local_scalar_t__> in__(params_r__, params_i__);
        static const char* function__ = "model_DirichletMultinomial_namespace::write_array";
        (void) function__;  // dummy to suppress unused var warning
        // read-transform, write parameters
        Eigen::Matrix<double, Eigen::Dynamic, 1> alpha = in__.simplex_constrain(nG);
        size_t alpha_j_1_max__ = nG;
        for (size_t j_1__ = 0; j_1__ < alpha_j_1_max__; ++j_1__) {
            vars__.push_back(alpha(j_1__));
        }
        double eta = in__.scalar_lb_constrain(0);
        vars__.push_back(eta);
        double lp__ = 0.0;
        (void) lp__;  // dummy to suppress unused var warning
        stan::math::accumulator<double> lp_accum__;
        local_scalar_t__ DUMMY_VAR__(std::numeric_limits<double>::quiet_NaN());
        (void) DUMMY_VAR__;  // suppress unused var warning
        if (!include_tparams__ && !include_gqs__) return;
        try {
            if (!include_gqs__ && !include_tparams__) return;
            if (!include_gqs__) return;
            // declare and define generated quantities
            current_statement_begin__ = 79;
            validate_non_negative_index("rho", "nG", nG);
            validate_non_negative_index("rho", "nLrho", nLrho);
            std::vector<Eigen::Matrix<double, Eigen::Dynamic, 1> > rho(nLrho, Eigen::Matrix<double, Eigen::Dynamic, 1>(nG));
            stan::math::initialize(rho, DUMMY_VAR__);
            stan::math::fill(rho, DUMMY_VAR__);
            // generated quantities statements
            current_statement_begin__ = 80;
            if (as_bool(logical_gt(nLrho, 0))) {
                current_statement_begin__ = 81;
                for (int ii = 1; ii <= nLrho; ++ii) {
                    current_statement_begin__ = 82;
                    stan::model::assign(rho, 
                                stan::model::cons_list(stan::model::index_uni(ii), stan::model::nil_index_list()), 
                                dirichlet_rng(add(to_vector(get_base1(X, get_base1(iLrho, ii, "iLrho", 1), "X", 1)), multiply(eta, alpha)), base_rng__), 
                                "assigning variable rho");
                }
            }
            // validate, write generated quantities
            current_statement_begin__ = 79;
            size_t rho_i_0_max__ = nLrho;
            for (size_t i_0__ = 0; i_0__ < rho_i_0_max__; ++i_0__) {
                stan::math::check_simplex(function__, "rho[i_0__]", rho[i_0__]);
            }
            size_t rho_j_1_max__ = nG;
            size_t rho_k_0_max__ = nLrho;
            for (size_t j_1__ = 0; j_1__ < rho_j_1_max__; ++j_1__) {
                for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) {
                    vars__.push_back(rho[k_0__](j_1__));
                }
            }
        } catch (const std::exception& e) {
            stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__());
            // Next line prevents compiler griping about no return
            throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***");
        }
    }
    template <typename RNG>
    void write_array(RNG& base_rng,
                     Eigen::Matrix<double,Eigen::Dynamic,1>& params_r,
                     Eigen::Matrix<double,Eigen::Dynamic,1>& vars,
                     bool include_tparams = true,
                     bool include_gqs = true,
                     std::ostream* pstream = 0) const {
      std::vector<double> params_r_vec(params_r.size());
      for (int i = 0; i < params_r.size(); ++i)
        params_r_vec[i] = params_r(i);
      std::vector<double> vars_vec;
      std::vector<int> params_i_vec;
      write_array(base_rng, params_r_vec, params_i_vec, vars_vec, include_tparams, include_gqs, pstream);
      vars.resize(vars_vec.size());
      for (int i = 0; i < vars.size(); ++i)
        vars(i) = vars_vec[i];
    }
    std::string model_name() const {
        return "model_DirichletMultinomial";
    }
    void constrained_param_names(std::vector<std::string>& param_names__,
                                 bool include_tparams__ = true,
                                 bool include_gqs__ = true) const {
        std::stringstream param_name_stream__;
        size_t alpha_j_1_max__ = nG;
        for (size_t j_1__ = 0; j_1__ < alpha_j_1_max__; ++j_1__) {
            param_name_stream__.str(std::string());
            param_name_stream__ << "alpha" << '.' << j_1__ + 1;
            param_names__.push_back(param_name_stream__.str());
        }
        param_name_stream__.str(std::string());
        param_name_stream__ << "eta";
        param_names__.push_back(param_name_stream__.str());
        if (!include_gqs__ && !include_tparams__) return;
        if (include_tparams__) {
        }
        if (!include_gqs__) return;
        size_t rho_j_1_max__ = nG;
        size_t rho_k_0_max__ = nLrho;
        for (size_t j_1__ = 0; j_1__ < rho_j_1_max__; ++j_1__) {
            for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) {
                param_name_stream__.str(std::string());
                param_name_stream__ << "rho" << '.' << k_0__ + 1 << '.' << j_1__ + 1;
                param_names__.push_back(param_name_stream__.str());
            }
        }
    }
    void unconstrained_param_names(std::vector<std::string>& param_names__,
                                   bool include_tparams__ = true,
                                   bool include_gqs__ = true) const {
        std::stringstream param_name_stream__;
        size_t alpha_j_1_max__ = (nG - 1);
        for (size_t j_1__ = 0; j_1__ < alpha_j_1_max__; ++j_1__) {
            param_name_stream__.str(std::string());
            param_name_stream__ << "alpha" << '.' << j_1__ + 1;
            param_names__.push_back(param_name_stream__.str());
        }
        param_name_stream__.str(std::string());
        param_name_stream__ << "eta";
        param_names__.push_back(param_name_stream__.str());
        if (!include_gqs__ && !include_tparams__) return;
        if (include_tparams__) {
        }
        if (!include_gqs__) return;
        size_t rho_j_1_max__ = (nG - 1);
        size_t rho_k_0_max__ = nLrho;
        for (size_t j_1__ = 0; j_1__ < rho_j_1_max__; ++j_1__) {
            for (size_t k_0__ = 0; k_0__ < rho_k_0_max__; ++k_0__) {
                param_name_stream__.str(std::string());
                param_name_stream__ << "rho" << '.' << k_0__ + 1 << '.' << j_1__ + 1;
                param_names__.push_back(param_name_stream__.str());
            }
        }
    }
}; // model
}  // namespace
typedef model_DirichletMultinomial_namespace::model_DirichletMultinomial stan_model;
#ifndef USING_R
stan::model::model_base& new_model(
        stan::io::var_context& data_context,
        unsigned int seed,
        std::ostream* msg_stream) {
  stan_model* m = new stan_model(data_context, seed, msg_stream);
  return *m;
}
#endif
#endif
back to top