Revision 272ac623c984dbf5defce76b6495c05accafe79f authored by Patrick Schratz on 17 December 2019, 04:08:28 UTC, committed by Patrick Schratz on 17 December 2019, 04:08:28 UTC
Build URL: https://circleci.com/gh/mlr-org/mlr/1264
Commit:
1 parent 9de9c6e
Raw File
generateThreshVsPerfData.Rd
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateThreshVsPerf.R
\name{generateThreshVsPerfData}
\alias{generateThreshVsPerfData}
\alias{ThreshVsPerfData}
\title{Generate threshold vs. performance(s) for 2-class classification.}
\usage{
generateThreshVsPerfData(
  obj,
  measures,
  gridsize = 100L,
  aggregate = TRUE,
  task.id = NULL
)
}
\arguments{
\item{obj}{(list of \link{Prediction} | list of \link{ResampleResult} | \link{BenchmarkResult})\cr
Single prediction object, list of them, single resample result, list of them, or a benchmark result.
In case of a list probably produced by different learners you want to compare, then
name the list with the names you want to see in the plots, probably
learner shortnames or ids.}

\item{measures}{(\link{Measure} | list of \link{Measure})\cr
Performance measure(s) to evaluate.
Default is the default measure for the task, see here \link{getDefaultMeasure}.}

\item{gridsize}{(\code{integer(1)})\cr
Grid resolution for x-axis (threshold).
Default is 100.}

\item{aggregate}{(\code{logical(1)})\cr
Whether to aggregate \link{ResamplePrediction}s or to plot the performance
of each iteration separately.
Default is \code{TRUE}.}

\item{task.id}{(\code{character(1)})\cr
Selected task in \link{BenchmarkResult} to do plots for, ignored otherwise.
Default is first task.}
}
\value{
(\link{ThreshVsPerfData}). A named list containing the measured performance
across the threshold grid, the measures, and whether the performance estimates were
aggregated (only applicable for (list of) \link{ResampleResult}s).
}
\description{
Generates data on threshold vs. performance(s) for 2-class classification that can be used for plotting.
}
\seealso{
Other generate_plot_data: 
\code{\link{generateCalibrationData}()},
\code{\link{generateCritDifferencesData}()},
\code{\link{generateFeatureImportanceData}()},
\code{\link{generateFilterValuesData}()},
\code{\link{generateLearningCurveData}()},
\code{\link{generatePartialDependenceData}()},
\code{\link{plotFilterValues}()}

Other thresh_vs_perf: 
\code{\link{plotROCCurves}()},
\code{\link{plotThreshVsPerf}()}
}
\concept{generate_plot_data}
\concept{thresh_vs_perf}
back to top