#include #include #include #include "c_optim.h" #ifdef DO_PROF #include #include #include #include #include #endif namespace rstpm2 { // import namespaces using namespace Rcpp; using namespace arma; // typedefs typedef bool constraintfn(int, double *, void *); // enums enum IntervalCensoring {RightCensored, ExactTime, LeftCensored, IntervalCensored}; // structs struct li_constraint { vec li; double constraint; }; struct gradli_constraint { mat gradli; mat constraint; }; li_constraint operator+(li_constraint const &left, li_constraint const &right) { li_constraint out = {left.li+right.li,left.constraint+right.constraint}; return out; } gradli_constraint operator+(gradli_constraint const &left, gradli_constraint const &right) { gradli_constraint out = {left.gradli+right.gradli,left.constraint+right.constraint}; return out; } // constants const double log2pi = std::log(2.0 * M_PI); // // Hadamard element-wise multiplication for the _columns_ of a matrix with a vector mat rmult(mat const &m, vec const &v) { mat out(m); out.each_col() %= v; return out; } mat lmult(vec const &v, mat const &m) { return rmult(m, v); } // print utilities void Rprint(NumericMatrix const & m) { for (int i=0; i -10){ double const dv = R::dnorm4(xv, 0, 1 , 0L), pv = R::pnorm5(xv, 0, 1, 1L, 0L); o = dv / pv; } else { double const log_dv = R::dnorm4(xv, 0, 1 , 1L), log_pv = R::pnorm5(xv, 0, 1, 1L, 1L); o = std::exp(log_dv - log_pv); } } return out; } vec qnorm01(vec const &x) { vec out(x.size()); double const *xi = x.begin(); for(double &o : out) o = R::qnorm5(*xi++, 0, 1, true, false); return out; } vec dnorm01(vec const &x) { vec out(x.size()); double const *xi = x.begin(); for(double &o : out) o = R::dnorm4(*xi++, 0, 1, false); return out; } // we could use templates for the following... vec logit(vec const & p) { return log(p/(1-p)); } vec expit(vec const & x) { return 1/(1+exp(-x)); } double logit(double p) { return log(p/(1-p)); } double expit(double x) { return 1/(1+exp(-x)); } // utility for fast ragged sums RcppExport SEXP tapplySum(SEXP s_y, SEXP s_group) { NumericVector y(s_y); NumericVector group(s_group); NumericVector::iterator iy, igroup; std::map out; for (iy = y.begin(), igroup = group.begin(); iy != y.end(); ++iy, ++igroup) { out[*igroup] += *iy; } return wrap(out); } // https://github.com/RcppCore/rcpp-gallery/issues/123#issue-553642056 arma::vec dmvnrm_arma(arma::mat const &x, arma::rowvec const &mean, arma::mat const &sigma, bool const logd = false) { int n = x.n_rows; int xdim = x.n_cols; arma::vec out(n); arma::mat const rooti = arma::inv(trimatu(arma::chol(sigma))); double const rootisum = arma::sum(log(rooti.diag())), constants = -xdim/2.0 * log2pi, other_terms = rootisum + constants; arma::rowvec z; for (int i=0; i < n; i++) { z = (x.row(i) - mean) * rooti; out(i) = other_terms - 0.5 * arma::dot(z, z); } if (logd == false) out = exp(out); return(out); } double dmvnrm_arma(arma::vec const &x, arma::vec const &mean, arma::mat const &sigma, bool const logd = false) { int xdim = x.n_elem; arma::mat const rooti = arma::inv(trimatu(arma::chol(sigma))); double const rootisum = arma::sum(log(rooti.diag())), constants = -xdim/2.0 * log2pi, other_terms = rootisum + constants; auto z = (x - mean).t() * rooti; double out = other_terms - 0.5 * arma::dot(z, z); if (logd == false) out = exp(out); return(out); } class Link { public: virtual vec link(vec const &S) const = 0; virtual vec ilink(vec const &x) const = 0; virtual vec h(vec const &eta, vec const &etaD) const = 0; virtual vec H(vec const &eta) const = 0; virtual mat gradH(vec const &eta, mat const &X) const = 0; virtual mat gradh(vec const &eta, vec const &etaD, mat const &X, mat const &XD) const = 0; virtual ~Link() { } }; // Various link objects class LogLogLink final : public Link { public: vec link(vec const &S) const { return log(-log(S)); } vec ilink(vec const &x) const { return exp(-exp(x)); } vec h(vec const &eta, vec const &etaD) const { return etaD % exp(eta); } vec H(vec const &eta) const { return exp(eta); } mat gradH(vec const &eta, mat const &X) const { return rmult(X,exp(eta)); } mat gradh(vec const &eta, vec const &etaD, mat const &X, mat const &XD) const { return rmult(XD, exp(eta)) + rmult(X, etaD % exp(eta)); } cube hessianH(vec const &beta, mat const &X) const { cube c(beta.size(), beta.size(), X.n_rows); for (size_t i=0; i(sexp); thetaAO = as(args["thetaAO"]); } }; // Useful relationship: d/dx expit(x)=expit(x)*expit(-x) class LogitLink final : public Link { public: vec link(vec const &S) const { return -logit(S); } vec ilink(vec const &x) const { return expit(-x); } // vec h(vec eta, vec etaD) { return etaD % exp(eta) % expit(-eta); } vec h(vec const &eta, vec const &etaD) const { return etaD % expit(eta); } vec H(vec const &eta) const { return -log(expit(-eta)); } mat gradH(vec const &eta, mat const &X) const { return rmult(X,expit(eta)); } mat gradh(vec const &eta, vec const &etaD, mat const &X, mat const &XD) const { // return rmult(X, etaD % exp(eta) % expit(-eta)) - // rmult(X, exp(2*eta) % etaD % expit(-eta) % expit(-eta)) + // rmult(XD, exp(eta) % expit(-eta)); return rmult(XD, expit(eta)) + rmult(X, expit(eta) % expit(-eta) % etaD); } cube hessianH(vec const &beta, mat const &X) const { cube c(beta.size(), beta.size(), X.n_rows); for (size_t i=0; i double optimfunction(int n, double * beta, void * void_obj) { T * obj = static_cast(void_obj); vec coef(beta,n); double value = obj->objective(coef % obj->parscale); if (obj->bfgs.trace>1) { Rprintf("beta="); Rprint(coef); Rprintf("objective=%.10g\n",value); }; // R_CheckUserInterrupt(); /* be polite -- did the user hit ctrl-C? */ return value; } template void optimgradient(int n, double * beta, double * grad, void * void_obj) { T * obj = static_cast(void_obj); vec coef(beta,n); if (obj->bfgs.trace>1) { Rprintf("beta="); Rprint(coef); } if (obj->bfgs.trace>2) { Rprintf("parscale="); Rprint(obj->parscale); } vec gr = obj->gradient(coef % obj->parscale); if (obj->bfgs.trace>1) { Rprintf("gradient="); Rprint(gr); } // if (obj->bfgs.trace>2) { // Rprintf("fdgradient="); Rprint(obj->fdgradient(coef % obj->parscale)); // } for (int i=0; i void optimfunction_nlm(int n, double * beta, double * f, void * void_obj) { T * obj = static_cast(void_obj); vec coef(beta,n); *f = obj->objective(coef % obj->parscale); } class BFGS2 : public BFGS { public: NumericMatrix calc_hessian(optimgr gr, void * ex) { if (parscale.size()==0) REprintf("parscale is not defined for BFGS2::calc_hessian.\n"); int n = coef.size(); NumericVector df1(n); NumericVector df2(n); NumericMatrix hess(n,n); double tmp; for(int i=0; i1) Rprintf("In NelderMead2->calc_hessian()...\n"); int n = coef.size(); NumericMatrix hess(n,n); double tmpi,tmpj,f1,f0,fm1,hi,hj,fij,fimj,fmij,fmimj; f0 = fn(n,&coef[0],ex); for(int i=0; i1) Rprint(hess); return hess; } vec parscale; }; class Nlm2 : public Nlm { public: NumericMatrix calc_hessian(fcn_p fn, void * ex, int debug = 0) { if (parscale.size()==0) REprintf("parscale is not defined for Nlm2::calc_hessian."); int n = coef.size(); NumericMatrix hess(n,n); double tmpi,tmpj,f1,f0,fm1,hi,hj,fij,fimj,fmij,fmimj; fn(n,&coef[0],&f0,ex); for(int i=0; ibfgs.coef = init; \ if (this->robust_initial) { \ optimNoHessianNM(this->bfgs.coef,50); \ } \ this->kappa = this->kappa_init; \ do { \ this->bfgs.optim(&optimfunction, &optimgradient, this->bfgs.coef, (void *) this); \ vec vcoef(&this->bfgs.coef[0],this->n); \ satisfied = this->feasible(vcoef % this->parscale); \ if (!satisfied) this->kappa *= 2.0; \ } while ((!satisfied) && this->kappa < 1.0e3); \ if (this->bfgs.trace > 0 && this->kappa>1.0) Rprintf("kappa=%f\n",this->kappa); \ this->bfgs.hessian = this->bfgs.calc_hessian(&optimgradient, (void *) this); \ } \ void optimNoHessianNM(NumericVector init, int maxit = 50) { \ NelderMead2 nm; \ nm.hessianp = false; \ nm.parscale = this->parscale; \ nm.maxit = maxit; \ nm.optim(&optimfunction, init, (void *) this); \ this->bfgs.coef = nm.coef; \ } \ void optimWithConstraintNM(NumericVector init) { \ bool satisfied; \ NelderMead2 nm; \ nm.hessianp = false; \ nm.parscale = this->parscale; \ this->kappa = this->kappa_init; \ do { \ nm.optim(&optimfunction, init, (void *) this); \ vec vcoef(&nm.coef[0],this->n); \ satisfied = this->feasible(vcoef % this->parscale); \ if (!satisfied) this->kappa *= 2.0; \ } while ((!satisfied) && this->kappa < this->maxkappa); \ if (this->bfgs.trace > 1) Rprintf("Calculating hessian...\n"); \ nm.hessian = nm.calc_hessian(&optimfunction, (void *) this, this->bfgs.trace); \ this->bfgs.coef = nm.coef; \ this->bfgs.hessian = nm.hessian; \ } \ void optimWithConstraintNlm(NumericVector init) { \ bool satisfied; \ Nlm2 nm; \ nm.gradtl = nm.steptl = this->reltol; \ nm.parscale = this->parscale; \ this->kappa = this->kappa_init; \ do { \ nm.optim(&optimfunction_nlm, init, (void *) this); \ vec vcoef(&nm.coef[0],this->n); \ satisfied = this->feasible(vcoef % this->parscale); \ if (!satisfied) this->kappa *= 2.0; \ } while ((!satisfied) && this->kappa < 1.0e3); \ if (this->bfgs.trace > 0 && this->kappa>1.0) Rprintf("kappa=%f\n",this->kappa); \ nm.hessian = nm.calc_hessian(&optimfunction_nlm, (void *) this); \ this->bfgs.coef = nm.coef; \ this->bfgs.hessian = nm.hessian; \ } \ void optimWithConstraint(NumericVector init) { \ if (this->bfgs.trace > 0) Rprintf("Starting optimization\n"); \ if (this->optimiser == "NelderMead") \ optimWithConstraintNM(init); \ else if (this->optimiser == "Nlm") \ optimWithConstraintNlm(init); \ else \ optimWithConstraintBFGS(init); \ } // struct for data struct BaseData { mat X, XD, X0, X1; vec bhazard,wt,wt0,event,time,offset; vec map0; uvec ind0, which0; }; // Main class for Stpm2 (fully parametric) class Stpm2 : public BaseData { public: typedef Stpm2 This; Stpm2(SEXP sexp) : bfgs() { List list = as(sexp); std::string linkType = as(list["link"]); if (linkType=="PH") link=new LogLogLink(sexp); else if (linkType=="PO") link=new LogitLink(sexp); else if (linkType=="probit") link=new ProbitLink(sexp); else if (linkType=="AH") link=new LogLink(sexp); else if (linkType=="AO") link=new ArandaOrdazLink(sexp); else { REprintf("No matching link.type"); return; } bfgs.coef = init = as(list["init"]); X = as(list["X"]); XD = as(list["XD"]); bhazard = as(list["bhazard"]); wt = as(list["wt"]); event = as(list["event"]); time = as(list["time"]); offset = as(list["offset"]); delayed = as(list["delayed"]); interval = as(list["interval"]); n = nbeta = init.size(); // number of parameters N = X.n_rows; // number of observations X1 = as(list["X1"]); X0 = as(list["X0"]); wt0.set_size(N); wt0.fill(0.0); if (delayed) { wt0 = as(list["wt0"]); } // if (interval) { // X1 = as(list["X1"]); // } map0 = as(list["map0"]); // length N map for X->X0 full_which0 = as(list["which0"]); // length N map for X0->X ind0 = as(list["ind0"]); // length N boolean for X0 ttype = as(list["ttype"]); kappa_init = kappa = as(list["kappa"]); maxkappa = as(list["maxkappa"]); optimiser = as(list["optimiser"]); bfgs.trace = as(list["trace"]); reltol = bfgs.reltol = as(list["reltol"]); robust_initial = as(list["robust_initial"]); bfgs.hessianp = false; bfgs.parscale = parscale = as(list["parscale"]); which0 = removeNaN(full_which0); } // log-likelihood components and constraints // Note: the li and gradli components depend on other variables (bhazard, wt, wt0, wt, event); // calculations should *not* be done on subsets virtual ~Stpm2() { delete link; } li_constraint li_right_censored(vec eta, vec etaD) { vec h = link->h(eta,etaD) + bhazard; vec H = link->H(eta); vec eps = h*0.0 + 1.0e-16; double constraint = kappa/2.0 * (sum(h % h % (h<0)) + sum(H % H % (H<0))); h = max(h,eps); H = max(H,eps); vec li = wt % event % log(h) - wt % H; li_constraint out = {li, constraint}; return out; } li_constraint li_left_truncated(vec eta0) { vec H0 = link->H(eta0); double constraint = kappa/2.0 * sum(H0 % H0 % (H0<0)); vec eps = H0*0.0 + 1.0e-16; vec li = wt0 % max(H0,eps); li_constraint out = {li, constraint}; return out; } li_constraint li_interval(vec eta, vec etaD, vec eta1) { vec H = link->H(eta); vec H1 = link->H(eta1); vec h = link->h(eta, etaD) + bhazard; double constraint = 0.0; vec eps = H*0.0 + 1e-16; H = max(H,eps); H1 = max(H1,eps); h = max(h,eps); vec li(N,fill::zeros); uvec index; index = find(ttype == 0); // right censored if (any(index)) { li(index) -= wt(index) % H(index); constraint += kappa/2.0 * sum(H(index) % H(index) % (H(index)<0)); } index = find(ttype == 1); // exact if (any(index)) { li(index) += wt(index) % (log(h(index)) - H(index)); constraint += kappa/2.0 * (sum(H(index) % H(index) % (H(index)<0))+ sum(h(index) % h(index) % (h(index)<0))); } index = find(ttype == 2); // left censored if (any(index)) { li(index) += wt(index) % log(1-exp(-H(index))); constraint += kappa/2.0 * sum(H(index) % H(index) % (H(index)<0)); } index = find(ttype == 3); // interval censored if (any(index)) { li(index) += wt(index) % log(exp(-H(index)) - exp(-H1(index))); constraint += kappa/2.0 * (sum(H(index) % H(index) % (H(index)<0))+ sum(H1(index) % H1(index) % (H1(index)<0))); } li_constraint out = {li, constraint}; return out; } li_constraint li(vec eta, vec etaD, vec eta0, vec eta1, vec beta) { if (interval) { return li_interval(eta+offset, etaD, eta1+offset); } else { li_constraint s = li_right_censored(eta+offset, etaD); if (delayed && !eta0.empty()) { li_constraint s0 = li_left_truncated(eta0+offset(which0)); s.constraint += s0.constraint; if (bfgs.trace > 0) { Rprint(which0); } s.li(which0) += s0.li; } return s; } } vec getli(vec beta) { vec vbeta = beta; vbeta.resize(nbeta); li_constraint lic = li(X*vbeta, XD*vbeta, X0*vbeta, X1*vbeta, beta); return lic.li; } mat getgradli(vec beta) { vec vbeta = beta; vbeta.resize(nbeta); gradli_constraint gradlic = gradli(X*vbeta, XD*vbeta, X0*vbeta, X1*vbeta, X, XD, X0, X1, beta); return gradlic.gradli; } // negative log-likelihood double objective(vec beta) { vec vbeta = beta; vbeta.resize(nbeta); li_constraint s = li(X * vbeta, XD * vbeta, X0 * vbeta, X1 * vbeta, beta); return -sum(s.li) + s.constraint; } // finite-differencing of the gradient for the objective vec fdgradient(vec beta, double eps = 1.0e-8) { int n = beta.size(); vec grad(n); for (int i=0; ih(eta,etaD) + bhazard; vec H = link->H(eta); vec eps = h*0.0 + 1.0e-16; mat gradH = link->gradH(eta,X); mat gradh = link->gradh(eta,etaD,X,XD); mat Xconstraint = kappa * (rmult(gradh, h % (h<0)) + rmult(gradH, H % (H<0))); // h = max(h,eps); mat Xgrad = -rmult(gradH, wt) + rmult(gradh, event / h % wt); gradli_constraint out = {Xgrad, Xconstraint}; return out; } gradli_constraint gradli_left_truncated(vec eta0, mat X0) { mat gradH0 = link->gradH(eta0, X0); vec H0 = link->H(eta0); vec eps = H0*0.0 + 1.0e-16; mat Xconstraint = kappa * rmult(gradH0, H0 % (H0<0)); mat Xgrad = rmult(gradH0, wt0); gradli_constraint out = {Xgrad, Xconstraint}; return out; } gradli_constraint gradli_interval_censored(vec eta, vec etaD, vec eta1, mat X, mat XD, mat X1) { vec H = link->H(eta); vec h = link->h(eta,etaD); vec H1 = link->H(eta1); mat gradH = link->gradH(eta,X); mat gradH1 = link->gradH(eta1,X1); mat gradh = link->gradh(eta, etaD, X, XD); vec eps = H*0.0 + 1e-16; // mat Xconstraint = kappa * (rmult(gradH, H % (Hh(eta, etaD) + bhazard; vec H = link->H(eta); bool condition = all((h>0) % (H>0)); if (delayed) { vec eta0 = X0 * beta; // vec etaD0 = XD0 * beta; vec H0 = link->H(eta0); condition = condition && all(H0>0); } return condition; } void pre_process() { for (int i = 0; ih(eta,etaD); // mat gradh = link->gradh(eta,etaD,X,XD); // cube hessianh = link->hessianh(beta,X,XD); // cube hessianH = link->hessianH(beta,X); // cube hessianH0 = link->hessianH(beta,X0); // mat hessian = -arma::sum(hessianH,2); // if (delayed) // hessian += arma::sum(hessianH0,2); // for (int i=0; imap0(index)); } uvec which0f(uvec index) { vec which = this->full_which0(index); return find(which == which); } uvec removeNaN(vec x) { vec newx = x; uvec index = find(newx == newx); // NaN != NaN newx = newx(index); return conv_to::from(newx); } SEXP return_modes() { return wrap(-1); } // used in NormalSharedFrailties SEXP return_variances() { return wrap(-1); } // used in NormalSharedFrailties NumericVector init; vec parscale, ttype, full_which0; double kappa_init, kappa, maxkappa, reltol; bool delayed, interval, robust_initial; int n, N, nbeta; BFGS2 bfgs; std::string optimiser; Link * link; }; // penalised smoothers // _Not_ sub-classes of Pstpm2, hence the longer function signatures class SmoothLogH { public: struct Smoother { int first_para, last_para; mat S; bool first; }; SmoothLogH(SEXP sexp) { List list = as(sexp); List lsmooth = as(list["smooth"]); for(int i=0; i(lsmooth[i]); List lsmoothiS = as(lsmoothi["S"]); Smoother smoothi = { as(lsmoothi["first.para"]) - 1, as(lsmoothi["last.para"]) - 1, as(lsmoothiS[0]), true }; smooth.push_back(smoothi); if (lsmoothiS.size()==2) { Smoother smoothi2 = { as(lsmoothi["first.para"]) - 1, as(lsmoothi["last.para"]) - 1, as(lsmoothiS[1]), false }; smooth.push_back(smoothi2); } } } double penalty(vec vbeta, vec sp) { double value = 0.0; for (size_t i=0; i < smooth.size(); ++i) { Smoother smoothi = smooth[i]; value += sp[i]/2 * dot(vbeta.subvec(smoothi.first_para,smoothi.last_para), smoothi.S * vbeta.subvec(smoothi.first_para,smoothi.last_para)); } return value; } vec penalty_gradient(vec vbeta, vec sp) { int n = vbeta.size(); rowvec vgr(n, fill::zeros); for (size_t i=0; i < smooth.size(); ++i) { Smoother smoothi = smooth[i]; vgr.subvec(smoothi.first_para,smoothi.last_para) += sp[i] * (smoothi.S * vbeta.subvec(smoothi.first_para,smoothi.last_para)).t(); } vec gr(n); for (int i=0; i(smoothi.last_para); ++j) values[i] += X(j,j); // if (!smoothi.first) values[i]=0.0; // edf for second smoother matrix: ignore } return values; } std::vector smooth; }; // // TODO: include first_para and last_para class data for the traces() method // class SmoothHaz { // public: // struct Smoother { // mat X0, X1, X2, X3; // vec w; // double lambda; // }; // SmoothHaz(SEXP sexp) { // List list = as(sexp); // List lsmooth = as(list["smooth"]); // for(int i=0; i(lsmooth[i]); // Smoother smoothi = { // as(lsmoothi["X0"]), // as(lsmoothi["X1"]), // as(lsmoothi["X2"]), // as(lsmoothi["X3"]), // as(lsmoothi["w"]), // as(lsmoothi["lambda"]) // }; // smooth.push_back(smoothi); // } // } // double penalty(vec vbeta, vec sp) { // double value = 0.0; // for (size_t i=0; i < smooth.size(); ++i) { // Smoother obj = smooth[i]; // vec s0 = obj.X0 * vbeta; // vec s1 = obj.X1 * vbeta; // vec s2 = obj.X2 * vbeta; // vec s3 = obj.X3 * vbeta; // vec h2 = exp(s0) % (s3 + 3*s1%s2 + s1%s1%s1); // value += sp[i]/2 * obj.lambda * sum(obj.w % h2 % h2); // } // return value; // } // vec penalty_gradient(vec vbeta, vec sp) { // int n = vbeta.size(); // rowvec vgr(n, fill::zeros); // for (size_t i=0; i < smooth.size(); ++i) { // Smoother obj = smooth[i]; // vec s0 = obj.X0 * vbeta; // vec s1 = obj.X1 * vbeta; // vec s2 = obj.X2 * vbeta; // vec s3 = obj.X3 * vbeta; // vec h2 = exp(s0) % (s3 + 3*s1%s2 + s1%s1%s1); // mat dh2sq_dbeta = // 2*lmult(h2 % exp(s0), // obj.X3+3*(rmult(obj.X1,s2)+rmult(obj.X2,s1))+3*lmult(s1%s1,obj.X1))+ // 2*lmult(h2 % h2,obj.X0); // vgr += sp[i]*obj.lambda*sum(lmult(obj.w, dh2sq_dbeta),0); // } // vec gr(n); // for (int i = 0; i smooth; // }; template double pstpm2_multivariate_step(int n, double * logsp_ptr, void * model_ptr) { T * model = static_cast(model_ptr); vec logsp(logsp_ptr,n); R_CheckUserInterrupt(); /* be polite -- did the user hit ctrl-C? */ return model->multivariate_step(logsp); } template double pstpm2_first_step(double logsp, void * model_ptr) { T * model = static_cast(model_ptr); R_CheckUserInterrupt(); /* be polite -- did the user hit ctrl-C? */ return model->first_step(logsp); } template void pstpm2_multivariate_stepNlm(int n, double * logsp_ptr, double * objective, void * model_ptr) { T * model = static_cast(model_ptr); vec logsp(logsp_ptr,n); R_CheckUserInterrupt(); /* be polite -- did the user hit ctrl-C? */ *objective = model->multivariate_step(logsp); } // Penalised link-based models template class Pstpm2 : public Stpm2Type, public Smooth { public: typedef Pstpm2 This; Pstpm2(SEXP sexp) : Stpm2Type(sexp), Smooth(sexp) { List list = as(sexp); sp = as(list["sp"]); reltol_search = as(list["reltol_search"]); reltol_outer = as(list["reltol_outer"]); alpha = as(list["alpha"]); criterion = as(list["criterion"]); outer_optim = as(list["outer_optim"]); } double objective(vec beta) { return Stpm2Type::objective(beta) + Smooth::penalty(beta,sp); } double objective0(vec beta) { return Stpm2Type::objective(beta); } vec gradient(vec beta) { return Stpm2Type::gradient(beta) + Smooth::penalty_gradient(beta,sp); } vec gradient0(vec beta) { return Stpm2Type::gradient(beta); } OPTIM_FUNCTIONS; double calc_edf(NumericMatrix hessian0) { double max_edf = this->bfgs.hessian.ncol(); mat m; bool flag = arma::solve(m, as(this->bfgs.hessian), as(hessian0)); double edf = flag ? arma::trace(m) : (max_edf*2.0); if (edf<0.0) edf = max_edf*2.0; return edf; } double first_step(double logsp) { sp[0] = exp(logsp); this->pre_process(); this->optimWithConstraint(this->init); // this->bfgs.hessian = this->bfgs.calc_hessian(&optimgradient, (void *) this); NumericMatrix hessian0 = this->bfgs.calc_hessian(&optimgradient, (void *) this); if (this->bfgs.trace > 1) { Rprintf("Debug on trace calculation. Coef:\n"); Rprint(this->bfgs.coef); if (this->bfgs.trace > 1) { Rprintf("Hessian0:\n"); Rprint(hessian0); Rprintf("Hessian:\n"); Rprint(this->bfgs.hessian); } } double edf = calc_edf(hessian0); double negll = this->bfgs.calc_objective(&optimfunction, (void *) this); double gcv = negll + alpha*edf; double bic = negll + alpha*edf*log(sum(this->event)); this->init = this->bfgs.coef; if (this->bfgs.trace > 0) Rprintf("sp=%f\tedf=%f\tnegll=%f\tgcv=%f\tbic=%f\talpha=%f\n",sp[0],edf,negll,gcv,bic,alpha); this->post_process(); return criterion==1 ? gcv : bic; } double multivariate_step(vec logsp) { this->pre_process(); double lsp = -9.0, usp = 9.0; for (size_t i=0; i < sp.size(); ++i) sp[i] = exp(bound(logsp[i],lsp,usp)); if (this->bfgs.trace > 0) { for (size_t i = 0; i < sp.size(); ++i) Rprintf("sp[%i]=%f\t",i,sp[i]); } this->optimWithConstraint(this->init); this->bfgs.hessian = this->bfgs.calc_hessian(&optimgradient, (void *) this); NumericMatrix hessian0 = this->bfgs.calc_hessian(&optimgradient, (void *) this); double edf = calc_edf(hessian0); double negll = this->bfgs.calc_objective(&optimfunction, (void *) this); double gcv = negll + alpha*edf; double bic = 2.0*negll + alpha*edf*log(sum(this->event)); this->init = this->bfgs.coef; // simple boundary constraints double constraint = 0.0; // double kappa = 1.0; for (size_t i=0; i < sp.size(); ++i) { if (logsp[i] < lsp) constraint += pow(logsp[i]-lsp,2.0); if (logsp[i] > usp) constraint += pow(logsp[i]-usp,2.0); } double objective = criterion==1 ? gcv + this->kappa/2.0*constraint : bic + this->kappa/2.0*constraint; if (this->bfgs.trace > 0) { Rprintf("edf=%f\tnegll=%f\tgcv=%f\tbic=%f\tobjective=%f\n",edf,negll,gcv,bic,objective); } this->post_process(); return objective; } SEXP optim_fixed() { this->pre_process(); this->optimWithConstraint(this->init); this->bfgs.hessian = this->bfgs.calc_hessian(&optimgradient, (void *) this); NumericMatrix hessian0 = this->bfgs.calc_hessian(&optimgradient, (void *) this); mat Proj = solve(as(wrap(this->bfgs.hessian)),as(wrap(hessian0))); double edf = trace(Proj); NumericVector edf_var = as(wrap(Smooth::traces(Proj))); this->post_process(); return List::create(_("sp")=wrap(sp), _("coef")=wrap(this->bfgs.coef), _("hessian")=wrap(this->bfgs.hessian), _("edf")=wrap(edf), _("edf_var")=wrap(edf_var), _("kappa")=wrap(this->kappa) ); } SEXP optim_first() { this->bfgs.reltol = reltol_search; double opt_sp = exp(Brent_fmin(log(0.001),log(1000.0),&(pstpm2_first_step),(void *) this,reltol_outer)); sp[0] = opt_sp; this->bfgs.reltol = this->reltol; return optim_fixed(); } SEXP optim_multivariate() { if (outer_optim==1) return optim_multivariate_NelderMead(); else return optim_multivariate_Nlm(); } SEXP optim_multivariate_NelderMead() { this->kappa = 10.0; NelderMead2 nm; nm.reltol = reltol_outer; nm.maxit = 500; nm.hessianp = false; nm.parscale = this->parscale; this->bfgs.reltol = reltol_search; NumericVector logsp(sp.size()); for (size_t i=0; i < sp.size(); ++i) logsp[i] = log(sp[i]); bool satisfied; do { nm.optim(&pstpm2_multivariate_step, logsp, (void *) this); satisfied = true; for (size_t i=0; i < sp.size(); ++i) if (logsp[i]< -9.0 || logsp[i] > 9.0) satisfied = false; if (!satisfied) this->kappa *= 2.0; } while (!satisfied && this->kappa<1.0e5); for (int i=0; i < nm.coef.size(); ++i) sp[i] = exp(nm.coef[i]); this->bfgs.coef = this->init; this->bfgs.reltol = this->reltol; return optim_fixed(); } SEXP optim_multivariate_Nlm() { this->kappa = this->kappa_init; Nlm2 nm; nm.gradtl = nm.steptl = reltol_outer; nm.itnlim = 100; // nm.hessianp = false; nm.parscale = this->parscale; this->bfgs.reltol = reltol_search; NumericVector logsp(sp.size()); for (size_t i=0; i < sp.size(); ++i) logsp[i] = log(sp[i]); bool satisfied; do { nm.optim(&pstpm2_multivariate_stepNlm, logsp, (void *) this); satisfied = true; for (size_t i=0; i < sp.size(); ++i) if (logsp[i]< -9.0 || logsp[i] > 9.0) satisfied = false; if (!satisfied) this->kappa *= 2.0; } while (!satisfied && this->kappa<1.0e5); for (int i=0; i < nm.coef.size(); ++i) sp[i] = exp(nm.coef[i]); this->bfgs.coef = this->init; this->bfgs.reltol = this->reltol; return optim_fixed(); } vec sp; double alpha, reltol_search, reltol_outer; int criterion, outer_optim; }; // template // void pstpm2_step_multivariate_nlm(int n, double * logsp, double * f, void *ex) { // *f = pstpm2_step_multivariate(n, logsp, ex); // }; /** Extension of stpm2 and pstpm2 to include gamma shared frailties with a cluster variable **/ template class GammaSharedFrailty : public Base { public: typedef std::vector Index; typedef std::map IndexMap; typedef GammaSharedFrailty This; GammaSharedFrailty(SEXP sexp) : Base(sexp) { List list = as(sexp); ivec cluster = as(list["cluster"]); recurrent = as(list["recurrent"]); this->nbeta = this->n - 1; // wragged array indexed by a map of vectors for (size_t id=0; iddelayed && this->ind0[id]) cluster_delayed[cluster[id]].push_back(id); } } /** Objective function for a Gamma shared frailty Assumes that weights == 1. **/ double objective(vec beta) { vec vbeta = beta; vbeta.resize(this->nbeta); // copy across to this->bfgs.coef? li_constraint s = li(this->X * vbeta, this->XD * vbeta, this->X0 * vbeta, this->X1 * vbeta, beta); return -sum(s.li) + s.constraint; } vec getli(vec beta) { vec vbeta = beta; vbeta.resize(this->nbeta); li_constraint lic = li(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, beta); return lic.li; } mat getgradli(vec beta) { vec vbeta = beta; vbeta.resize(this->nbeta); gradli_constraint gradlic = gradli(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, this->X, this->XD, this->X0, this->X1, beta); return gradlic.gradli; } li_constraint li(vec eta, vec etaD, vec eta0, vec eta1, vec beta) { vec ll(clusters.size(), fill::zeros); int n = beta.size(); vec vbeta(beta); // logtheta is the last parameter in beta vbeta.resize(this->nbeta); double theta = exp(beta[n-1]); eta = this->X * vbeta + this->offset; etaD = this->XD * vbeta; vec h = this->link->h(eta,etaD) + this->bhazard; vec H = this->link->H(eta); double constraint = this->kappa/2.0 * (sum(h % h % (h<0)) + sum(H % H % (H<0))); vec eps = h*0.0 + 1.0e-16; h = max(h,eps); H = max(H,eps); vec H0; if (this->delayed) { eta0 = this->X0 * vbeta + this->offset(this->which0); H0 = this->link->H(eta0); constraint += this->kappa/2.0 * sum(H0 % H0 % (H0<0)); } int i=0; for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it, ++i) { uvec index = conv_to::from(it->second); int mi = sum(this->event(index)); double sumH = sum(H(index)); double sumHenter = 0.0; ll(i) += sum(log(h(index)) % this->event(index)); // shared if (this->delayed && !cluster_delayed[it->first].empty()) { uvec ind00 = conv_to::from(cluster_delayed[it->first]); sumHenter = sum(H0(Base::map0f(ind00))); } if (recurrent) { // for calendar timescale ll(i) += -(1.0/theta+mi)*log(1.0+theta*(sumH - sumHenter)); // Rondeau et al (2005) } else { // for clustered data or gap timescale ll(i) += -(1.0/theta+mi)*log(1.0+theta*sumH) + 1.0/theta*log(1.0+theta*sumHenter); // Rondeau et al } // Note: log(gamma(1/theta+mi)/gamma(1/theta)*theta^mi) = sum_k=1^mi (log(1+theta(mi-k))) (mi>0); 0 (otherwise) if (mi>0) { for (int k=1; k<=mi; ++k) ll(i) += log(1.0+theta*(mi-k)); } } li_constraint out; out.constraint=constraint; out.li = ll; return out; } vec gradient(vec beta) { vec vbeta = beta; vbeta.resize(this->nbeta); gradli_constraint gc = gradli(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, this->X, this->XD, this->X0, this->X1, beta); rowvec dconstraint = sum(gc.constraint,0); rowvec vgr = sum(gc.gradli,0); vec gr(beta.size()); for (size_t i = 0; i(clusters.size(), n); mat grconstraint = zeros(clusters.size(), n); vec vbeta(beta); // theta is the last parameter in beta vbeta.resize(n-1); double theta = exp(beta[n-1]); // eta = this->X * vbeta; // etaD = this->XD * vbeta; vec h = this->link->h(eta,etaD); vec H = this->link->H(eta); mat gradh = this->link->gradh(eta,etaD,this->X,this->XD); mat gradH = this->link->gradH(eta,this->X); vec eps = h*0.0 + 1.0e-16; h = max(h,eps); H = max(H,eps); vec H0; mat gradH0; if (this->delayed) { // vec eta0 = this->X0 * vbeta; // vec etaD0 = this->XD0 * vbeta; H0 = this->link->H(eta0); gradH0 = this->link->gradH(eta0,this->X0); } int i=0; for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it, ++i) { int mi=0; double sumH = 0.0, sumHenter = 0.0; vec gradi = zeros(n-1); vec gradHi = zeros(n-1); vec gradH0i = zeros(n-1); // vec grconstraint = zeros(n-1); for (Index::iterator j=it->second.begin(); j!=it->second.end(); ++j) { sumH += H(*j); gradHi += gradH.row(*j).t(); if (this->event(*j)==1) { gradi += gradh.row(*j).t() / h(*j); mi++; } if (this->delayed && this->ind0[*j]) { int jj = this->map0[*j]; sumHenter += H0(jj); gradH0i += gradH0.row(jj).t(); grconstraint(i,span(0,n-2)) += this->kappa * (gradH0.row(jj) * H0(jj) * (H0(jj)<0)); } grconstraint(i,span(0,n-2)) += this->kappa * ((gradh.row(*j) * h(*j) * (h(*j)<0)) + (gradH.row(*j) * H(*j) * (H(*j)<0))); } for (int k=0; k0) { for (int k=1; k<=mi; ++k) // axiom: D(log(1+exp(btheta)*(mi-k)),btheta) gr(i,n-1) += theta*(mi-k)/(1.0+theta*(mi-k)); } } gradli_constraint grli = {gr, grconstraint}; return grli; } bool feasible(vec beta) { vec coef(beta); coef.resize(this->n-1); return Base::feasible(coef); } OPTIM_FUNCTIONS; IndexMap clusters, cluster_delayed; bool recurrent; }; /** Extension of stpm2 and pstpm2 to include Clayton copula **/ template class ClaytonCopula : public Base { public: typedef std::vector Index; typedef std::map IndexMap; typedef ClaytonCopula This; ClaytonCopula(SEXP sexp) : Base(sexp) { List list = as(sexp); ivec cluster = as(list["cluster"]); this->nbeta = this->n - 1; // wragged array indexed by a map of vectors for (size_t id=0; idnbeta); // copy across to this->bfgs.coef? li_constraint s = li(this->X * vbeta, this->XD * vbeta, this->X0 * vbeta, this->X1 * vbeta, beta); return -sum(s.li) + s.constraint; } vec getli(vec beta) { vec vbeta = beta; vbeta.resize(this->nbeta); li_constraint lic = li(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, beta); return lic.li; } mat getgradli(vec beta) { // WRONG - DO NOT USE! vec vbeta = beta; vbeta.resize(this->nbeta); gradli_constraint gradlic = gradli(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, this->X, this->XD, this->X0, this->X1, beta); return gradlic.gradli; } li_constraint li(vec eta, vec etaD, vec eta0, vec eta1, vec beta) { vec ll(clusters.size(), fill::zeros); int n = beta.size(); vec vbeta(beta); // logtheta is the last parameter in beta vbeta.resize(this->nbeta); double itheta = exp(-beta[n-1]); // NOTE: INVERSE OF THE VARIANCE eta = this->X * vbeta + this->offset; etaD = this->XD * vbeta; vec h = this->link->h(eta,etaD) + this->bhazard; vec H = this->link->H(eta); double constraint = this->kappa/2.0 * (sum(h % h % (h<0)) + sum(H % H % (H<0))); vec eps = h*0.0 + 1.0e-16; h = max(h,eps); H = max(H,eps); int i=0; for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it, ++i) { uvec index = conv_to::from(it->second); int ni = (it->second).size(); int mi = sum(this->event(index)); double logScopula = -itheta*log(sum(exp(H(index)/itheta)) - ni + 1); // Rprintf("ni=%i, mi=%i, log(Scopula)=%g\n",ni,mi,logScopula); if (mi==0) { ll(i) = logScopula; } else { ll(i) = logScopula/itheta*(itheta+mi); // Rprintf("ll(i)=%g\n",ll(i)); ll(i) += sum((log(h(index))+H(index)/itheta) % this->event(index)); for (int k=1; k<=mi; ++k) ll(i) += log(itheta+k-1.0) - log(itheta); } } li_constraint out; out.constraint=constraint; out.li = ll; return out; } vec gradient(vec beta) { // WRONG - DO NOT USE! vec vbeta = beta; vbeta.resize(this->nbeta); gradli_constraint gc = gradli(this->X*vbeta, this->XD*vbeta, this->X0*vbeta, this->X1*vbeta, this->X, this->XD, this->X0, this->X1, beta); rowvec dconstraint = sum(gc.constraint,0); rowvec vgr = sum(gc.gradli,0); vec gr(beta.size()); for (size_t i = 0; i(clusters.size(), n); mat grconstraint = zeros(clusters.size(), n); vec vbeta(beta); // theta is the last parameter in beta vbeta.resize(n-1); double itheta = exp(-beta[n-1]); // eta = this->X * vbeta; // etaD = this->XD * vbeta; vec h = this->link->h(eta,etaD); vec H = this->link->H(eta); mat gradh = this->link->gradh(eta,etaD,this->X,this->XD); mat gradH = this->link->gradH(eta,this->X); vec eps = h*0.0 + 1.0e-16; h = max(h,eps); H = max(H,eps); int i=0; for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it, ++i) { uvec index = conv_to::from(it->second); int ni = (it->second).size(); int mi = sum(this->event(index)); double Scopula0 = sum(exp(H(index)/itheta)) - ni + 1; vec gradEi = zeros(n-1); vec gradHexpHi = zeros(n-1); vec gradH0i = zeros(n-1); for (Index::iterator j=it->second.begin(); j!=it->second.end(); ++j) { gradHexpHi += gradH.row(*j).t() * exp(H(*j)/itheta) / itheta; if (this->event(*j)==1) { gradEi += gradH.row(*j).t() / itheta + gradh.row(*j)/h(*j); } grconstraint(i,span(0,n-2)) += this->kappa * ((gradh.row(*j) * h(*j) * (h(*j)<0)) + (gradH.row(*j) * H(*j) * (H(*j)<0))); } for (int k=0; k0) { for (int k=0; kn-1); return Base::feasible(coef); } OPTIM_FUNCTIONS; IndexMap clusters; }; /** @brief Wrapper for calling an objective_cluster method **/ template double call_objective_cluster(double bi, void * model_ptr) { T * model = static_cast(model_ptr); return model->objective_cluster(bi); } template double call_gradient_cluster(double bi, void * model_ptr) { T * model = static_cast(model_ptr); return model->gradient_cluster(bi); } /** @brief Wrapper for calling a gradient_cluster method **/ template double call_objective_cluster_bfgs(int n, double *bi, void * model_ptr) { T * model = static_cast(model_ptr); return model->objective_cluster(*bi); } template void call_gradient_cluster_bfgs(int dim, double * bi, double* out, void * model_ptr) { T * model = static_cast(model_ptr); *out = model->gradient_cluster(*bi); } /** Extension of stpm2 and pstpm2 to include log-normal shared frailties with a cluster variable **/ template class NormalSharedFrailty : public Base { public: typedef std::vector Index; typedef std::map IndexMap; typedef NormalSharedFrailty This; typedef std::map Doubles; NormalSharedFrailty(SEXP sexp) : Base(sexp) { List list = as(sexp); const BaseData fullTmp = {this->X, this->XD, this->X0, this->X1, this->bhazard, this->wt, this->wt0, this->event, this->time, this->offset, this->map0, this->ind0, this->which0}; full = fullTmp; IntegerVector cluster = as(list["cluster"]); full_Z = Z = as(list["Z"]); full_Z0 = Z0 = Z(this->ind0); // this could be size 0... gauss_x = as(list["gauss_x"]); // node values gauss_w = as(list["gauss_w"]); // probability weights adaptive = as(list["adaptive"]); recurrent = as(list["recurrent"]); first = true; // wragged array indexed by a map of vectors for (int id=0; idnbeta = this->n - 1; } /** Objective function for a log-normal shared frailty Assumes that weights == 1. **/ double objective_nonadaptive(vec beta) { int n = beta.size(); vec vbeta(beta); // nu=log(variance) is the last parameter in beta; exp(nu)=sigma^2 vbeta.resize(n-1); // case: alpha for joint model? double sigma = exp(0.5*beta[n-1]); // standard deviation int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); double constraint = 0.0; double ll = 0.0; bool left_trunc_not_recurrent = (this->delayed && !this->recurrent && !this->interval); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } double Lj = 0.0, L0j = 0.0; for (int k=0; kgauss_x(k); if (left_trunc_not_recurrent) { this->delayed = false; lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); this->delayed = true; H0ik = Base::li_left_truncated(eta0+Z0*bi); L0j += exp(-sum(H0ik.li))*wstar(k); constraint += H0ik.constraint; } else { lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); } Lj += exp(sum(lik.li))*wstar(k); constraint += lik.constraint; } ll += log(Lj); if (left_trunc_not_recurrent) ll -= log(L0j); } resetDesign(); return -(ll - constraint); } double objective(vec beta) { return this->adaptive ? objective_adaptive(beta) : objective_nonadaptive(beta); } double objective_adaptive(vec beta) { int n = beta.size(); this->objective_cluster_beta = beta; // for use by objective_cluster() vec vbeta(beta); // nu=log(variance) is the last parameter in beta; exp(nu)=sigma^2 double sigma = exp(0.5*vbeta(this->n-1)); // standard deviation vbeta.resize(n-1); int K = gauss_x.size(); // number of nodes double constraint = 0.0; double ll = 0.0; bool left_trunc_not_recurrent = (this->delayed && !this->recurrent && !this->interval); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); // cluster-specific mode double mu; if (!first) { // double Tol = this->reltol; // int Maxit = 100; // mu = R_zeroin2(-100.0, // 100.0, // gradient_cluster(-100.0), // gradient_cluster(100.0), // &(call_gradient_cluster), // (void *) this, // &Tol, // &Maxit); mu = Brent_fmin(muhat[it->first]-1e-1, muhat[it->first]+1e-1, &(call_objective_cluster), (void *) this, this->reltol); } if (first || std::abs(mu-muhat[it->first])>(1e-1-1e-5)) // if (first || mu>100.0-1e-5 || mu<-100.0+1e-5) mu = Brent_fmin(-100.0,100.0,&(call_objective_cluster),(void *) this, this->reltol); muhat[it->first] = mu; // cluster-specific variance double eps = 1e-6; double tau = sqrt(eps*2.0/(gradient_cluster(mu+eps)-gradient_cluster(mu-eps))); tauhat[it->first] = tau; vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } double Lj = 0.0, L0j = 0.0; for (int k=0; kgauss_x(k); li_constraint lik, l0ik; if (left_trunc_not_recurrent) { // first: reset such that delayed is true and calculate the likelihood for right censoring this->delayed = false; lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); // second: reset for delayed is true and calculate the likelihood for left truncation this->delayed = true; l0ik = Base::li_left_truncated(eta0+Z0*bi); constraint += l0ik.constraint; g0 = exp(sum(l0ik.li)+R::dnorm(bi,0.0,sigma,1)); L0j += sqrt(2.0)*tau*g0*gauss_w(k)*exp(gauss_x(k)*gauss_x(k)); } else { lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); } g = exp(sum(lik.li)+R::dnorm(bi,0.0,sigma,1)); Lj += sqrt(2.0)*tau*g*gauss_w(k)*exp(gauss_x(k)*gauss_x(k)); constraint += lik.constraint; } ll += log(Lj); if (left_trunc_not_recurrent && L0j>0.0) ll -= log(L0j); } resetDesign(); first = false; return -(ll - constraint); } vec getli(vec beta) { vec missing(1,fill::zeros); return missing; } mat getgradli(vec beta) { mat missing(1,1,fill::zeros); return missing; } // TODO: redefine li void resetDesign() { this->X = full.X; this->XD = full.XD; this->bhazard = full.bhazard; this->wt = full.wt; this->event = full.event; this->time = full.time; this->offset = full.offset; this->X1 = full.X1; this->X0 = full.X0; this->wt0 = full.wt0; this->which0 = full.which0; this->Z = full_Z; this->Z0 = full_Z0; } void clusterDesign(IndexMap::iterator it) { clusterid = it->first; uvec index = conv_to::from(it->second); this->X = full.X.rows(index); this->XD = full.XD.rows(index); this->X1 = full.X1.rows(index); this->bhazard = full.bhazard(index); this->wt = full.wt(index); this->event = full.event(index); this->time = full.time(index); this->offset = full.offset(index); this->Z = full_Z(index); this->Z0 = vec(1,fill::zeros); if (this->delayed) { uvec index0 = Base::map0f(index); this->X0 = full.X0.rows(index0); this->wt0 = full.wt0(index0); this->Z0 = full_Z0(index0); this->which0 = Base::which0f(index); } } // log(g(bi)) double objective_cluster(double bi) { vec vbeta = this->objective_cluster_beta; // nu=log(variance) is the last parameter in vbeta double sigma = exp(0.5*vbeta(this->n-1)); // standard deviation vbeta.resize(this->n - 1); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } li_constraint lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,vbeta); double ll = sum(lik.li) + R::dnorm(bi,0.0,sigma,1); return -ll; } double gradient_cluster(double bi) { vec vbeta = this->objective_cluster_beta; // nu=log(variance) is the last parameter in vbeta double sigma = exp(0.5*vbeta(this->n-1)); // standard deviation vbeta.resize(this->n - 1); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X1.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } mat X = mat(Z); // mat(this->X.n_rows,1,fill::ones); mat XD = mat(this->XD.n_rows,1,fill::zeros); mat X0 = mat(Z0); // mat(this->X0.n_rows,1,fill::ones); mat X1 = mat(Z); // mat(this->X1.n_rows,1,fill::ones); gradli_constraint gradlik = Base::gradli(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,X,XD,X0,X1,vbeta); rowvec gradll = sum(gradlik.gradli,0) - bi/sigma/sigma; return -gradll(0); } // double fdhessian(boost::function f, double x, double eps = 1.0e-6) { // return (-f(x+2*eps)+16*f(x+eps)-30*f(x)+16*f(x-eps)-f(x-2*eps))/12.0/eps/eps; // } void calculate_modes_and_variances() { this->objective_cluster_beta = this->bfgs.coef; for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); modes[it->first] = Brent_fmin(-100.0,100.0,&(call_objective_cluster),(void *) this, this->reltol); // Abramowitz and Stegun 1972, p. 884 double bi = modes[it->first]; double eps = 1e-6; double f1 = objective_cluster(bi+2*eps); double f2 = objective_cluster(bi+eps); double f3 = objective_cluster(bi); double f4 = objective_cluster(bi-eps); double f5 = objective_cluster(bi-2*eps); double hessian = (-f1+16*f2-30*f3+16*f4-f5)/12.0/eps/eps; variances[it->first] = 1.0/hessian; variances[it->first] = eps*2.0/(gradient_cluster(bi+eps)-gradient_cluster(bi-eps)); } resetDesign(); } vec gradient(vec beta) { return this->adaptive ? gradient_adaptive(beta) : gradient_nonadaptive(beta); } vec gradient_adaptive(vec beta) { int n = beta.size(); this->objective_cluster_beta = beta; // for use by objective_cluster() vec betastar = beta; // we use the last element as bi - is this a good idea? vec vbeta(beta); // nu=log(variance) is the last parameter in beta; exp(nu)=sigma^2 double sigma = exp(0.5*vbeta(this->n-1)); // standard deviation vbeta.resize(n-1); int K = gauss_x.size(); // number of nodes // rowvec dconstraint(n,fill::zeros); rowvec gradLi(n,fill::zeros); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); // cluster-specific modes and variances (calculated in the objective function) double mu = muhat[it->first]; double tau = tauhat[it->first]; mat Zmain(Z); mat ZD(this->XD.n_rows,1,fill::zeros); mat Z0main(Z0); mat Z1(Z); mat Xstar = join_horiz(this->X, Zmain); mat XDstar = join_horiz(this->XD, ZD); // assumes time-invariant random effects mat X0star = join_horiz(this->X0, Z0main); mat X1star = join_horiz(this->X1, Z1); double Lj = 0.0; rowvec numerator(this->n,fill::zeros); for (int k=0; kgauss_x(k); betastar[betastar.size()-1] = bi; vec etastar = Xstar * betastar; vec etaDstar = XDstar * betastar; vec eta0star = X0star * betastar; vec eta1star = X1star * betastar; li_constraint lik = Base::li(etastar,etaDstar,eta0star,eta1star,beta); double g = exp(sum(lik.li) + R::dnorm(bi,0.0,sigma,1)); gradli_constraint gradlik = Base::gradli(etastar, etaDstar, eta0star, eta1star,Xstar, XDstar, X0star, X1star,beta); Lj += sqrt(2.0)*tau*g*gauss_w(k)*exp(gauss_x(k)*gauss_x(k)); rowvec numeratorstar = sqrt(2.0)*tau*g*sum(gradlik.gradli,0)*gauss_w(k)*exp(gauss_x(k)*gauss_x(k)); numerator(span(0,n-2)) += numeratorstar(span(0,n-2)); numerator(n-1) += sqrt(2.0)*tau*gauss_w(k)*exp(gauss_x(k)*gauss_x(k))*g*(- 0.5 + bi*bi/2/sigma/sigma); // dconstraint += sum(gradlik.constraint,0); } gradLi += numerator/Lj; } vec gr(n,fill::zeros); for (int i=0; iN,1,fill::zeros); mat Z0main(Z0); mat Z1(Z); mat Xstar = join_horiz(this->X, Zmain); mat XDstar = join_horiz(this->XD, ZD); // assumes time-invariant random effects mat X0star = join_horiz(this->X0, Z0main); mat X1star = join_horiz(this->X1, Z1); int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); int G = clusters.size(); vec Li(G,fill::zeros); mat gradLi(G,n,fill::zeros); vec betastar = beta; rowvec dconstraint(n,fill::zeros); mat likeli_jk(this->N,K,fill::zeros); vec likeli_k(K,fill::zeros); cube grad_jk(this->N,n,K,fill::zeros); mat grad_kn(K,n,fill::zeros); // for K nodes calculation for (int k=0; k::from(it->second); vec likeli_k = exp(sum(likeli_jk.rows(index),0)).t(); // sum index components at all nodes for cluster i for(int k=0; kbfgs.trace>0) { // Rprintf("fdgradient="); Rprint(this->fdgradient(beta)); // } vec gr(n,fill::zeros); for (int i=0; i double call_objective_clusterND(int n, double * beta, void * model_ptr) { T * model = static_cast(model_ptr); vec bi(beta, n); return model->objective_cluster(bi); } template void call_gradient_clusterND(int n, double * beta, double * out, void * model_ptr) { T * model = static_cast(model_ptr); vec bi(beta, n); vec grad = model->gradient_cluster(bi); for (int i=0; i class NormalSharedFrailty2D : public Base { public: typedef std::vector Index; typedef std::map IndexMap; typedef NormalSharedFrailty2D This; typedef std::map MapVec; typedef std::map MapMat; typedef std::map MapCube; NormalSharedFrailty2D(SEXP sexp) : Base(sexp) { List list = as(sexp); const BaseData fullTmp = {this->X, this->XD, this->X0, this->X1, this->bhazard, this->wt, this->wt0, this->event, this->time, this->offset, this->map0, this->ind0, this->which0}; full = fullTmp; IntegerVector cluster = as(list["cluster"]); full_Z = Z = as(list["Z"]); full_Z0 = Z0 = Z.rows(this->ind0); // this could be size 0... gauss_x = as(list["gauss_x"]); // node values gauss_w = as(list["gauss_w"]); // probability weights adaptive = as(list["adaptive"]); // current = false recurrent = as(list["recurrent"]); first = true; // wragged array indexed by a map of vectors for (int id=0; idnbeta = this->n - reparm; } /** Objective function for normal random effects **/ double corrtransform(double x) { return 2.0/(1.0+exp(-x))-1.0; } double objective_nonadaptive(vec beta) { int n = beta.size(); vec vbeta(beta); calc_SqrtSigma(beta); vbeta.resize(n-reparm); int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); vec d = sqrt(2.0)*gauss_x; double constraint = 0.0; double ll = 0.0; bool left_trunc_not_recurrent = (this->delayed && !this->recurrent && !this->interval); vec dk(redim), bi(redim); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } double Lj = 0.0, L0j = 0.0; for (int k=0; kdelayed = false; lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); this->delayed = true; H0ik = Base::li_left_truncated(eta0+Z0*bi); L0j += exp(-sum(H0ik.li))*wstar(k)*wstar(kk); constraint += H0ik.constraint; } else { lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); } Lj += exp(sum(lik.li))*wstar(k)*wstar(kk); constraint += lik.constraint; } } ll += log(Lj); if (left_trunc_not_recurrent) ll -= log(L0j); } resetDesign(); return -(ll - constraint); } double objective_cluster(vec bi) { vec vbeta = this->objective_cluster_beta; vbeta.resize(this->n - reparm); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(this->Z0.n_rows,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } li_constraint lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,vbeta); vec zero(redim,fill::zeros); double ll = sum(lik.li) + dmvnrm_arma(bi,zero,this->Sigma,true); return -ll; } vec gradient_cluster(vec bi) { vec vbeta = this->objective_cluster_beta; vbeta.resize(this->n - reparm); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X1.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } mat X = Z; mat XD = mat(this->XD.n_rows,redim,fill::zeros); mat X0 = Z0; mat X1 = Z; gradli_constraint gradlik = Base::gradli(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,X,XD,X0,X1,vbeta); vec gradll = (sum(gradlik.gradli,0)).t() - invSigma*bi; return -gradll; } mat calc_SqrtSigma(vec beta, bool calc_inverse = true) { bool flag; int n = beta.size(); Sigma.resize(redim,redim); Sigma(0,0) = exp(beta[n-reparm]); // variance double corr = corrtransform(beta[n-2]); // correlation Sigma(1,1) = exp(beta[n-1]); // variance Sigma(0,1) = Sigma(1,0) = corr*sqrt(Sigma(1,1)*Sigma(0,0)); // covariance if (calc_inverse) flag = inv(invSigma, Sigma); flag = chol(SqrtSigma,Sigma); // which version of RcppArmadillo?? if (!flag) { Rprintf("Sigma:\n"); Rprint(Sigma); Rcpp::stop("Matrix sqrt invalid"); } return SqrtSigma; } // gradient in SqrtSigma wrt beta cube gradSqrtSigma(vec beta, double eps = 1.0e-6) { cube out(redim,redim,reparm,fill::zeros); int offseti = beta.size()-reparm; vec betax; mat val; for (int i=0; i(opt.calc_hessian(call_gradient_clusterND, (void *) this))); flag = chol(sqrttau,tau); // which version of RcppArmadillo?? if (!flag) { Rprintf("tau:\n"); Rprint(tau); Rcpp::stop("Matrix sqrt invalid"); } return sqrttau; } // gradient for SqrtSigma for the cluster-specific modes cube gradSqrtSigma_adaptive(BFGS opt, double eps = 1.0e-6) { cube out(redim,redim,reparm,fill::zeros); vec betax; vec old = this->objective_cluster_beta; mat val; for (int i=0; iobjective_cluster_beta = betax; val = calc_SqrtSigma_adaptive(opt); betax(i) -= 2.0*eps; this->objective_cluster_beta = betax; out.slice(i) = (val - calc_SqrtSigma_adaptive(opt))/2.0/eps; } this->objective_cluster_beta = old; return out; } double objective_adaptive(vec beta) { int n = beta.size(); this->objective_cluster_beta = beta; // for use by objective_cluster() vec vbeta(beta); calc_SqrtSigma(beta); vbeta.resize(n-reparm); int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); vec d = sqrt(2.0)*gauss_x; double constraint = 0.0; double ll = 0.0; bool left_trunc_not_recurrent = (this->delayed && !this->recurrent && !this->interval); vec dk(redim), bi(redim); vec zero(redim,fill::zeros); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); if (muhat.count(it->first)==0) { // initialise muhat[it->first].set_size(redim); for (int i=0; ifirst](i) = 0.0; } // cluster-specific modes BFGS opt; opt.optim(&(call_objective_clusterND), &(call_gradient_clusterND), as(wrap(muhat[it->first])), (void *) this); muhat[it->first] = as(wrap(opt.coef)); // variance for the cluster-specific modes arma::mat tau, sqrttau; bool flag = inv(tau, as(opt.calc_hessian(call_gradient_clusterND, (void *) this))); double dettau = det(tau); // logdet would be more precise dettauhat[it->first] = dettau; flag = chol(sqrttau,tau); // which version of RcppArmadillo? if (!flag) { Rprintf("tau:\n"); Rprint(tau); Rcpp::stop("Matrix sqrt invalid."); } sqrttauhat[it->first] = sqrttau; if (this->optimiser=="BFGS") gradsqrttauhat[it->first] = gradSqrtSigma_adaptive(opt); vec eta = this->X * vbeta; vec etaD = this->XD * vbeta; vec eta0(1,fill::zeros); vec eta1(this->X.n_rows,fill::zeros); if (this->delayed) { eta0 = this->X0 * vbeta; eta1 = this->X1 * vbeta; } double Lj = 0.0, L0j = 0.0; for (int k=0; kfirst] + sqrttau * dk; double g = 0.0, g0 = 0.0; li_constraint lik, l0ik; if (left_trunc_not_recurrent) { this->delayed = false; lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); this->delayed = true; l0ik = Base::li_left_truncated(eta0+Z0*bi); g0 = exp(sum(l0ik.li)+dmvnrm_arma(bi,zero,Sigma,true)); L0j += wstar(k)*wstar(kk)*g0*exp(dot(d,d)/2); } else { lik = Base::li(eta+Z*bi,etaD,eta0+Z0*bi,eta1+Z*bi,beta); } g = exp(sum(lik.li)+dmvnrm_arma(bi,zero,Sigma,true)); Lj += wstar(k)*wstar(kk)*g*exp(dot(d,d)/2); constraint += lik.constraint; } } ll += log(2.0*datum::pi)*redim/2.0+log(dettau)/2.0+log(Lj); if (left_trunc_not_recurrent) ll -= log(L0j); } resetDesign(); first = false; return -(ll - constraint); } vec gradient(vec beta) { return this->adaptive ? gradient_adaptive(beta) : gradient_nonadaptive(beta); } vec gradient_nonadaptive(vec beta) { int n = beta.size(); vec vbeta(beta); vbeta.resize(n-reparm); calc_SqrtSigma(beta); cube gradSqrt = gradSqrtSigma(beta); if (this->bfgs.trace>1) Rprint(gradSqrt); int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); vec d = sqrt(2.0)*gauss_x; vec gradl(n,fill::zeros); vec dk(redim), bi(redim); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); uvec index = conv_to::from(it->second); mat Zmain(Z); mat ZD(this->XD.n_rows,redim,fill::zeros); mat Z0main(Z0); mat Z1(Z); mat Xstar = join_horiz(this->X, Zmain); mat XDstar = join_horiz(this->XD, ZD); // assumes time-invariant random effects mat X0star = join_horiz(this->X0, Z0main); mat X1star = join_horiz(this->X1, Z1); double L = 0.0; vec gradL(n,fill::zeros); vec betastar = beta; betastar.resize(beta.size() - reparm + redim); // for K nodes calculation for (int k=0; kobjective_cluster_beta = beta; // for use by objective_cluster() vec vbeta(beta); vbeta.resize(n-reparm); calc_SqrtSigma(beta); // unnecessary? int K = gauss_x.size(); // number of nodes vec wstar = gauss_w/sqrt(datum::pi); vec d = sqrt(2.0)*gauss_x; vec gradl(n,fill::zeros); vec dk(redim), bi(redim); vec zero(redim,fill::zeros); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); uvec index = conv_to::from(it->second); vec mu = muhat[it->first]; mat sqrttau = sqrttauhat[it->first]; cube gradsqrttau = gradsqrttauhat[it->first]; mat Zmain(Z); mat ZD(this->XD.n_rows,redim,fill::zeros); mat Z0main(Z0); mat Z1(Z); mat Xstar = join_horiz(this->X, Zmain); mat XDstar = join_horiz(this->XD, ZD); // assumes time-invariant random effects mat X0star = join_horiz(this->X0, Z0main); mat X1star = join_horiz(this->X1, Z1); double L = 0.0; vec gradL(n,fill::zeros); vec betastar = beta; betastar.resize(beta.size() - reparm + redim); // for K nodes calculation for (int k=0; kX = full.X; this->XD = full.XD; this->bhazard = full.bhazard; this->wt = full.wt; this->event = full.event; this->time = full.time; this->offset = full.offset; this->X1 = full.X1; this->X0 = full.X0; this->wt0 = full.wt0; this->which0 = full.which0; this->Z = full_Z; this->Z0 = full_Z0; } void clusterDesign(IndexMap::iterator it) { clusterid = it->first; uvec index = conv_to::from(it->second); this->X = full.X.rows(index); this->XD = full.XD.rows(index); this->X1 = full.X1.rows(index); this->bhazard = full.bhazard(index); this->wt = full.wt(index); this->event = full.event(index); this->offset = full.offset(index); this->time = full.time(index); this->Z = full_Z.rows(index); this->Z0 = mat(1,redim,fill::zeros); if (this->delayed) { uvec index0 = Base::map0f(index); this->X0 = full.X0.rows(index0); this->wt0 = full.wt0(index0); this->Z0 = full_Z0.rows(index0); this->which0 = Base::which0f(index); } } bool feasible(vec beta) { vec coef(beta); coef.resize(beta.size()-reparm); return Base::feasible(coef); } void calculate_modes_and_variances() { vec beta = as(this->bfgs.coef); this->objective_cluster_beta = beta; calc_SqrtSigma(beta); for (IndexMap::iterator it=clusters.begin(); it!=clusters.end(); ++it) { clusterDesign(it); if (muhat.count(it->first)==0) { // initialise muhat[it->first].set_size(redim); for (int i=0; ifirst](i) = 0.0; } // cluster-specific modes BFGS opt; opt.optim(&(call_objective_clusterND), &(call_gradient_clusterND), as(wrap(muhat[it->first])), (void *) this); muhat[it->first] = as(wrap(opt.coef)); // cluster-specific variance arma::mat tau, sqrttau; bool flag = inv(tau, as(opt.calc_hessian(call_gradient_clusterND, (void *) this))); double dettau = det(tau); dettauhat[it->first] = dettau; flag = chol(sqrttau,tau); // which version of RcppArmadillo? if (!flag) { Rprintf("tau:\n"); Rprint(tau); Rcpp::stop("Matrix sqrt invalid."); } sqrttauhat[it->first] = sqrttau; gradsqrttauhat[it->first] = gradSqrtSigma_adaptive(opt); } resetDesign(); } SEXP return_modes() { calculate_modes_and_variances(); return wrap(muhat); } SEXP return_variances() { calculate_modes_and_variances(); return wrap(sqrttauhat); // actually, these are not variances! } OPTIM_FUNCTIONS; IndexMap clusters; vec gauss_x, gauss_w; mat full_Z, full_Z0, Z, Z0; BaseData full; MapVec modes; MapMat variances; vec objective_cluster_beta; MapVec muhat; MapMat sqrttauhat; MapCube gradsqrttauhat; std::map dettauhat; int clusterid, redim, reparm; bool adaptive, first, recurrent; mat Sigma, SqrtSigma, invSigma; }; template SEXP stpm2_model_output_(SEXP args) { T model(args); List list = as(args); std::string return_type = as(list["return_type"]); vec beta(&model.init[0],model.n); if (return_type == "optim") { model.pre_process(); model.optimWithConstraint(model.init); // model.bfgs.hessian = model.bfgs.calc_hessian(&optimgradient, (void *) &model); // model.bfgs.hessian = model.calcHessian(model.bfgs.coef); model.post_process(); return List::create(_("fail")=model.bfgs.fail, _("coef")=wrap(model.bfgs.coef), _("hessian")=wrap(model.bfgs.hessian), _("kappa")=wrap(model.kappa)); } else if (return_type == "objective") return wrap(model.objective(beta)); else if (return_type == "gradient") { model.objective(beta); // only needed for updating NormalSharedFrailty return wrap(model.gradient(beta)); } else if (return_type == "feasible") return wrap(model.feasible(beta)); else if (return_type == "modes") return model.return_modes(); else if (return_type == "variances") return model.return_variances(); else if (return_type == "li") return wrap(model.getli(beta)); else if (return_type == "gradli") return wrap(model.getgradli(beta)); else { REprintf("Unknown return_type.\n"); return wrap(-1); } } template SEXP pstpm2_model_output_(SEXP args) { T model(args); List list = as(args); std::string return_type = as(list["return_type"]); vec beta(&model.init[0],model.n); if (return_type == "optim_fixed") return(model.optim_fixed()); else if (return_type == "optim_first") return(model.optim_first()); else if (return_type == "optim_multivariate") return(model.optim_multivariate()); else if (return_type == "objective") return wrap(model.objective(beta)); else if (return_type == "objective0") return wrap(model.objective0(beta)); else if (return_type == "gradient") { model.objective(beta); return wrap(model.gradient(beta)); } else if (return_type == "gradient0") return wrap(model.gradient0(beta)); else if (return_type == "constraint") return wrap(model.feasible(beta)); else if (return_type == "feasible") return wrap(model.feasible(beta)); else if (return_type == "li") return wrap(model.getli(beta)); else if (return_type == "gradli") return wrap(model.getgradli(beta)); else if (return_type == "modes") return model.return_modes(); else if (return_type == "variances") return model.return_variances(); else { REprintf("Unknown return_type.\n"); return wrap(-1); } } class prof_class { public: prof_class(const std::string &name){ #ifdef DO_PROF std::stringstream ss; auto t = std::time(nullptr); auto tm = *std::localtime(&t); ss << "profile-" << name << std::put_time(&tm, "-%d-%m-%Y-%H-%M-%S.log"); Rcpp::Rcout << "Saving profile output to '" << ss.str() << "'" << std::endl; const std::string s = ss.str(); ProfilerStart(s.c_str()); #endif } #ifdef DO_PROF ~prof_class(){ ProfilerStop(); } #endif }; RcppExport SEXP model_output(SEXP args) { prof_class prof("model_output"); List list = as(args); std::string type = as(list["type"]); if (type=="stpm2") { return stpm2_model_output_(args); } else if (type=="pstpm2") { return pstpm2_model_output_ >(args); } else if (type=="stpm2_gamma_frailty") { return stpm2_model_output_ >(args); } else if (type=="pstpm2_gamma_frailty") { return pstpm2_model_output_,SmoothLogH> >(args); } else if (type=="stpm2_normal_frailty") { return stpm2_model_output_ >(args); } else if (type=="stpm2_normal_frailty_2d") { return stpm2_model_output_ >(args); } else if (type=="pstpm2_normal_frailty") { // order is important here return pstpm2_model_output_,SmoothLogH> >(args); } else if (type=="pstpm2_normal_frailty_2d") { // order is important here return pstpm2_model_output_,SmoothLogH> >(args); } else if (type=="stpm2_clayton_copula") { return stpm2_model_output_ >(args); } else if (type=="pstpm2_clayton_copula") { return pstpm2_model_output_,SmoothLogH> >(args); } else { REprintf("Unknown model type.\n"); return wrap(-1); } } double OmegaCoef_helper(int q, int m, double alpha, Rcpp::NumericMatrix &qm) { if (m==0) return(1.0); if (qm(q,m)!=0.0) return(qm(q,m)); if (m==q-1) qm(q,m) = std::pow(alpha,1.0-q)*R::gammafn(q-alpha)/R::gammafn(1-alpha); else qm(q,m) = OmegaCoef_helper(q-1,m,alpha,qm) + OmegaCoef_helper(q-1,m-1,alpha,qm)*((q-1)/alpha-(q-m)); return(qm(q,m)); } RcppExport SEXP OmegaCoef(SEXP _q, SEXP _alpha) { int q = as(_q); double alpha = as(_alpha); Rcpp::NumericMatrix qm(q+1,q); Rcpp::NumericVector out(q); for (int i=0; i<=q; i++) for (int j=0; j<=(q-1); j++) qm(i,j)=0.0; for (int m=0; mXshape * vbeta(span(0,data->n0-1))); vec scale = exp(data->Xscale * vbeta(span(data->n0,data->n1-1))); vec cure = 1.0/(1.0+exp(-data->Xcure * vbeta(span(data->n1,data->n2-1)))); for (size_t i=0; i < data->time.size(); ++i) { ll += data->status(i)==1.0 ? log(1.0-cure(i)) + R::dweibull(data->time(i),shape(i),scale(i),1) : log(cure(i)+(1.0-cure(i)) * R::pweibull(data->time(i),shape(i),scale(i),0,0)); } R_CheckUserInterrupt(); /* be polite -- did the user hit ctrl-C? */ return -ll; } RcppExport SEXP fitCureModel(SEXP stime, SEXP sstatus, SEXP sXshape, SEXP sXscale, SEXP sXcure, SEXP sbeta) { mat Xshape = as(sXshape); mat Xscale = as(sXscale); mat Xcure = as(sXcure); vec time = as(stime); vec status = as(sstatus); NumericVector init = as(sbeta); int n0=Xshape.n_cols; int n1=n0+Xscale.n_cols; int n2=n1+Xcure.n_cols; CureModel data = {n0,n1,n2,Xshape,Xscale,Xcure,time,status}; NelderMead nm; nm.reltol = 1.0e-8; nm.maxit = 1000; nm.optim(& fminfn_cureModel, init, (void *) &data); for (int i = 0; i