% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measures.R \docType{data} \name{measures} \alias{measures} \alias{featperc} \alias{timetrain} \alias{timepredict} \alias{timeboth} \alias{sse} \alias{measureSSE} \alias{mse} \alias{measureMSE} \alias{rmse} \alias{measureRMSE} \alias{medse} \alias{measureMEDSE} \alias{sae} \alias{measureSAE} \alias{mae} \alias{measureMAE} \alias{medae} \alias{measureMEDAE} \alias{rsq} \alias{measureRSQ} \alias{expvar} \alias{measureEXPVAR} \alias{arsq} \alias{rrse} \alias{measureRRSE} \alias{rae} \alias{measureRAE} \alias{mape} \alias{measureMAPE} \alias{msle} \alias{measureMSLE} \alias{rmsle} \alias{measureRMSLE} \alias{kendalltau} \alias{measureKendallTau} \alias{spearmanrho} \alias{measureSpearmanRho} \alias{mmce} \alias{measureMMCE} \alias{acc} \alias{measureACC} \alias{ber} \alias{measureBER} \alias{multiclass.aunu} \alias{measureAUNU} \alias{multiclass.aunp} \alias{measureAUNP} \alias{multiclass.au1u} \alias{measureAU1U} \alias{multiclass.au1p} \alias{measureAU1P} \alias{multiclass.brier} \alias{measureMulticlassBrier} \alias{logloss} \alias{measureLogloss} \alias{ssr} \alias{measureSSR} \alias{qsr} \alias{measureQSR} \alias{lsr} \alias{measureLSR} \alias{kappa} \alias{measureKAPPA} \alias{wkappa} \alias{measureWKAPPA} \alias{auc} \alias{measureAUC} \alias{brier} \alias{measureBrier} \alias{brier.scaled} \alias{measureBrierScaled} \alias{bac} \alias{measureBAC} \alias{tp} \alias{measureTP} \alias{tn} \alias{measureTN} \alias{fp} \alias{measureFP} \alias{fn} \alias{measureFN} \alias{tpr} \alias{measureTPR} \alias{tnr} \alias{measureTNR} \alias{fpr} \alias{measureFPR} \alias{fnr} \alias{measureFNR} \alias{ppv} \alias{measurePPV} \alias{npv} \alias{measureNPV} \alias{fdr} \alias{measureFDR} \alias{mcc} \alias{measureMCC} \alias{f1} \alias{measureF1} \alias{gmean} \alias{measureGMEAN} \alias{gpr} \alias{measureGPR} \alias{multilabel.hamloss} \alias{measureMultilabelHamloss} \alias{multilabel.subset01} \alias{measureMultilabelSubset01} \alias{multilabel.f1} \alias{measureMultilabelF1} \alias{multilabel.acc} \alias{measureMultilabelACC} \alias{multilabel.ppv} \alias{measureMultilabelPPV} \alias{multilabel.tpr} \alias{measureMultilabelTPR} \alias{cindex} \alias{cindex.uno} \alias{iauc.uno} \alias{ibrier} \alias{meancosts} \alias{mcp} \alias{db} \alias{dunn} \alias{G1} \alias{G2} \alias{silhouette} \title{Performance measures.} \format{none} \usage{ featperc timetrain timepredict timeboth sse measureSSE(truth, response) mse measureMSE(truth, response) rmse measureRMSE(truth, response) medse measureMEDSE(truth, response) sae measureSAE(truth, response) mae measureMAE(truth, response) medae measureMEDAE(truth, response) rsq measureRSQ(truth, response) expvar measureEXPVAR(truth, response) arsq rrse measureRRSE(truth, response) rae measureRAE(truth, response) mape measureMAPE(truth, response) msle measureMSLE(truth, response) rmsle measureRMSLE(truth, response) kendalltau measureKendallTau(truth, response) spearmanrho measureSpearmanRho(truth, response) mmce measureMMCE(truth, response) acc measureACC(truth, response) ber measureBER(truth, response) multiclass.aunu measureAUNU(probabilities, truth) multiclass.aunp measureAUNP(probabilities, truth) multiclass.au1u measureAU1U(probabilities, truth) multiclass.au1p measureAU1P(probabilities, truth) multiclass.brier measureMulticlassBrier(probabilities, truth) logloss measureLogloss(probabilities, truth) ssr measureSSR(probabilities, truth) qsr measureQSR(probabilities, truth) lsr measureLSR(probabilities, truth) kappa measureKAPPA(truth, response) wkappa measureWKAPPA(truth, response) auc measureAUC(probabilities, truth, negative, positive) brier measureBrier(probabilities, truth, negative, positive) brier.scaled measureBrierScaled(probabilities, truth, negative, positive) bac measureBAC(truth, response) tp measureTP(truth, response, positive) tn measureTN(truth, response, negative) fp measureFP(truth, response, positive) fn measureFN(truth, response, negative) tpr measureTPR(truth, response, positive) tnr measureTNR(truth, response, negative) fpr measureFPR(truth, response, negative, positive) fnr measureFNR(truth, response, negative, positive) ppv measurePPV(truth, response, positive, probabilities = NULL) npv measureNPV(truth, response, negative) fdr measureFDR(truth, response, positive) mcc measureMCC(truth, response, negative, positive) f1 measureF1(truth, response, positive) gmean measureGMEAN(truth, response, negative, positive) gpr measureGPR(truth, response, positive) multilabel.hamloss measureMultilabelHamloss(truth, response) multilabel.subset01 measureMultilabelSubset01(truth, response) multilabel.f1 measureMultilabelF1(truth, response) multilabel.acc measureMultilabelACC(truth, response) multilabel.ppv measureMultilabelPPV(truth, response) multilabel.tpr measureMultilabelTPR(truth, response) cindex cindex.uno iauc.uno ibrier meancosts mcp db dunn G1 G2 silhouette } \arguments{ \item{truth}{(\link{factor})\cr Vector of the true class.} \item{response}{(\link{factor})\cr Vector of the predicted class.} \item{probabilities}{(\link{numeric} | \link{matrix})\cr a) For purely binary classification measures: The predicted probabilities for the positive class as a numeric vector. b) For multiclass classification measures: The predicted probabilities for all classes, always as a numeric matrix, where columns are named with class labels.} \item{negative}{(\code{character(1)})\cr The name of the negative class.} \item{positive}{(\code{character(1)})\cr The name of the positive class.} } \description{ A performance measure is evaluated after a single train/predict step and returns a single number to assess the quality of the prediction (or maybe only the model, think AIC). The measure itself knows whether it wants to be minimized or maximized and for what tasks it is applicable. All supported measures can be found by \link{listMeasures} or as a table in the tutorial appendix: \url{https://mlr.mlr-org.com/articles/tutorial/measures.html}. If you want a measure for a misclassification cost matrix, look at \link{makeCostMeasure}. If you want to implement your own measure, look at \link{makeMeasure}. Most measures can directly be accessed via the function named after the scheme measureX (e.g. measureSSE). For clustering measures, we compact the predicted cluster IDs such that they form a continuous series starting with 1. If this is not the case, some of the measures will generate warnings. Some measure have parameters. Their defaults are set in the constructor \link{makeMeasure} and can be overwritten using \link{setMeasurePars}. } \references{ He, H. & Garcia, E. A. (2009) \emph{Learning from Imbalanced Data.} IEEE Transactions on Knowledge and Data Engineering, vol. 21, no. 9. pp. 1263-1284. H. Uno et al. \emph{On the C-statistics for Evaluating Overall Adequacy of Risk Prediction Procedures with Censored Survival Data} Statistics in medicine. 2011;30(10):1105-1117. \url{https://doi.org/10.1002/sim.4154}. H. Uno et al. \emph{Evaluating Prediction Rules for T-Year Survivors with Censored Regression Models} Journal of the American Statistical Association 102, no. 478 (2007): 527-37. \url{https://www.jstor.org/stable/27639883}. } \seealso{ Other performance: \code{\link{ConfusionMatrix}}, \code{\link{calculateConfusionMatrix}()}, \code{\link{calculateROCMeasures}()}, \code{\link{estimateRelativeOverfitting}()}, \code{\link{makeCostMeasure}()}, \code{\link{makeCustomResampledMeasure}()}, \code{\link{makeMeasure}()}, \code{\link{performance}()}, \code{\link{setAggregation}()}, \code{\link{setMeasurePars}()} } \concept{performance} \keyword{datasets}