mlpack  3.0.4
Class Hierarchy

Go to the graphical class hierarchy

This inheritance list is sorted roughly, but not completely, alphabetically:
[detail level 123]
 CAdaBoost< mlpack::decision_stump::DecisionStump<> >
 CAdaBoost< mlpack::perceptron::Perceptron<> >
 CAddDecomposableEvaluate< FunctionType >
 CAddDecomposableEvaluateConst< FunctionType >
 CAddDecomposableEvaluateStatic< FunctionType >
 CAddDecomposableEvaluateWithGradientConst< FunctionType >
 CAddDecomposableEvaluateWithGradientStatic< FunctionType >
 CAddDecomposableGradient< FunctionType >
 CAddDecomposableGradientConst< FunctionType >
 CAddDecomposableGradientStatic< FunctionType >
 CAddEvaluate< FunctionType >
 CAddEvaluateConst< FunctionType >
 CAddEvaluateStatic< FunctionType >
 CAddEvaluateWithGradient< FunctionType >
 CAddEvaluateWithGradientConst< FunctionType >
 CAddEvaluateWithGradientStatic< FunctionType >
 CAddGradient< FunctionType >
 CAddGradientConst< FunctionType >
 CAddGradientStatic< FunctionType >
 Cversion< mlpack::ann::FFN< OutputLayerType, InitializationRuleType, CustomLayer... > >
 Cversion< mlpack::ann::RNN< OutputLayerType, InitializationRuleType, CustomLayer... > >
 Cstatic_visitor
 Ctemplate AuxiliarySplitInfo< ElemType >
 CFastMKS< mlpack::kernel::CosineDistance >
 CFastMKS< mlpack::kernel::EpanechnikovKernel >
 CFastMKS< mlpack::kernel::GaussianKernel >
 CFastMKS< mlpack::kernel::HyperbolicTangentKernel >
 CFastMKS< mlpack::kernel::LinearKernel >
 CFastMKS< mlpack::kernel::PolynomialKernel >
 CFastMKS< mlpack::kernel::TriangularKernel >
 CFunctionType
 CHMM< distribution::RegressionDistribution >
 CHMM< mlpack::distribution::DiscreteDistribution >
 CHMM< mlpack::distribution::GaussianDistribution >
 CHMM< mlpack::gmm::GMM >
 CHRectBound< metric::EuclideanDistance, ElemType >
 CHRectBound< MetricType >
 CHRectBound< mlpack::metric::LMetric, ElemType >
 CInitHMMModel
 CIPMetric< mlpack::kernel::CosineDistance >
 CIPMetric< mlpack::kernel::EpanechnikovKernel >
 CIPMetric< mlpack::kernel::GaussianKernel >
 CIPMetric< mlpack::kernel::HyperbolicTangentKernel >
 CIPMetric< mlpack::kernel::LinearKernel >
 CIPMetric< mlpack::kernel::PolynomialKernel >
 CIPMetric< mlpack::kernel::TriangularKernel >
 CIsVector< VecType >If value == true, then VecType is some sort of Armadillo vector or subview
 CIsVector< arma::Col< eT > >
 CIsVector< arma::Row< eT > >
 CIsVector< arma::SpCol< eT > >
 CIsVector< arma::SpRow< eT > >
 CIsVector< arma::SpSubview< eT > >
 CIsVector< arma::subview_col< eT > >
 CIsVector< arma::subview_row< eT > >
 CLRSDP< mlpack::optimization::SDP< arma::sp_mat > >
 CLRSDPFunction< mlpack::optimization::SDP< arma::sp_mat > >
 CAdaBoost< WeakLearnerType, MatType >The AdaBoost class
 CAdaBoostModelThe model to save to disk
 CAMF< TerminationPolicyType, InitializationRuleType, UpdateRuleType >This class implements AMF (alternating matrix factorization) on the given matrix V
 CAverageInitializationThis initialization rule initializes matrix W and H to root of the average of V, perturbed with uniform noise
 CCompleteIncrementalTermination< TerminationPolicy >This class acts as a wrapper for basic termination policies to be used by SVDCompleteIncrementalLearning
 CGivenInitializationThis initialization rule for AMF simply fills the W and H matrices with the matrices given to the constructor of this object
 CIncompleteIncrementalTermination< TerminationPolicy >This class acts as a wrapper for basic termination policies to be used by SVDIncompleteIncrementalLearning
 CMaxIterationTerminationThis termination policy only terminates when the maximum number of iterations has been reached
 CNMFALSUpdateThis class implements a method titled 'Alternating Least Squares' described in the following paper:
 CNMFMultiplicativeDistanceUpdateThe multiplicative distance update rules for matrices W and H
 CNMFMultiplicativeDivergenceUpdateThis follows a method described in the paper 'Algorithms for Non-negative
 CRandomAcolInitialization< columnsToAverage >This class initializes the W matrix of the AMF algorithm by averaging p randomly chosen columns of V
 CRandomInitializationThis initialization rule for AMF simply fills the W and H matrices with uniform random noise in [0, 1]
 CSimpleResidueTerminationThis class implements a simple residue-based termination policy
 CSimpleToleranceTermination< MatType >This class implements residue tolerance termination policy
 CSVDBatchLearningThis class implements SVD batch learning with momentum
 CSVDCompleteIncrementalLearning< MatType >This class computes SVD using complete incremental batch learning, as described in the following paper:
 CSVDCompleteIncrementalLearning< arma::sp_mat >TODO : Merge this template specialized function for sparse matrix using common row_col_iterator
 CSVDIncompleteIncrementalLearningThis class computes SVD using incomplete incremental batch learning, as described in the following paper:
 CValidationRMSETermination< MatType >This class implements validation termination policy based on RMSE index
 CAdd< InputDataType, OutputDataType >Implementation of the Add module class
 CAddMerge< InputDataType, OutputDataType, CustomLayers >Implementation of the AddMerge module class
 CAlphaDropout< InputDataType, OutputDataType >The alpha - dropout layer is a regularizer that randomly with probability 'ratio' sets input values to alphaDash
 CAtrousConvolution< ForwardConvolutionRule, BackwardConvolutionRule, GradientConvolutionRule, InputDataType, OutputDataType >Implementation of the Atrous Convolution class
 CAddTaskGenerator of instances of the binary addition task
 CCopyTaskGenerator of instances of the binary sequence copy task
 CSortTaskGenerator of instances of the sequence sort task
 CBaseLayer< ActivationFunction, InputDataType, OutputDataType >Implementation of the base layer
 CBatchNorm< InputDataType, OutputDataType >Declaration of the Batch Normalization layer class
 CBilinearInterpolation< InputDataType, OutputDataType >Definition and Implementation of the Bilinear Interpolation Layer
 CConcat< InputDataType, OutputDataType, CustomLayers >Implementation of the Concat class
 CConcatPerformance< OutputLayerType, InputDataType, OutputDataType >Implementation of the concat performance class
 CConstant< InputDataType, OutputDataType >Implementation of the constant layer
 CConstInitializationThis class is used to initialize weight matrix with constant values
 CConvolution< ForwardConvolutionRule, BackwardConvolutionRule, GradientConvolutionRule, InputDataType, OutputDataType >Implementation of the Convolution class
 CCrossEntropyError< InputDataType, OutputDataType >The cross-entropy performance function measures the network's performance according to the cross-entropy between the input and target distributions
 CDropConnect< InputDataType, OutputDataType >The DropConnect layer is a regularizer that randomly with probability ratio sets the connection values to zero and scales the remaining elements by factor 1 /(1 - ratio)
 CDropout< InputDataType, OutputDataType >The dropout layer is a regularizer that randomly with probability 'ratio' sets input values to zero and scales the remaining elements by factor 1 / (1 - ratio) rather than during test time so as to keep the expected sum same
 CELU< InputDataType, OutputDataType >The ELU activation function, defined by
 CFastLSTM< InputDataType, OutputDataType >An implementation of a faster version of the Fast LSTM network layer
 CFFN< OutputLayerType, InitializationRuleType, CustomLayers >Implementation of a standard feed forward network
 CFFTConvolution< BorderMode, padLastDim >Computes the two-dimensional convolution through fft
 CFlexibleReLU< InputDataType, OutputDataType >The FlexibleReLU activation function, defined by
 CFullConvolution
 CGaussianInitializationThis class is used to initialize weigth matrix with a gaussian
 CGlimpse< InputDataType, OutputDataType >The glimpse layer returns a retina-like representation (down-scaled cropped images) of increasing scale around a given location in a given image
 CGlorotInitializationType< Uniform >This class is used to initialize the weight matrix with the Glorot Initialization method
 CGRU< InputDataType, OutputDataType >An implementation of a gru network layer
 CHardTanH< InputDataType, OutputDataType >The Hard Tanh activation function, defined by
 CHeInitializationThis class is used to initialize weight matrix with the He initialization rule given by He et
 CIdentityFunctionThe identity function, defined by
 CInitTraits< InitRuleType >This is a template class that can provide information about various initialization methods
 CInitTraits< KathirvalavakumarSubavathiInitialization >Initialization traits of the kathirvalavakumar subavath initialization rule
 CInitTraits< NguyenWidrowInitialization >Initialization traits of the Nguyen-Widrow initialization rule
 CJoin< InputDataType, OutputDataType >Implementation of the Join module class
 CKathirvalavakumarSubavathiInitializationThis class is used to initialize the weight matrix with the method proposed by T
 CKLDivergence< InputDataType, OutputDataType >The Kullback–Leibler divergence is often used for continuous distributions (direct regression)
 CLayerNorm< InputDataType, OutputDataType >Declaration of the Layer Normalization class
 CLayerTraits< LayerType >This is a template class that can provide information about various layers
 CLeakyReLU< InputDataType, OutputDataType >The LeakyReLU activation function, defined by
 CLecunNormalInitializationThis class is used to initialize weight matrix with the Lecun Normalization initialization rule
 CLinear< InputDataType, OutputDataType >Implementation of the Linear layer class
 CLinearNoBias< InputDataType, OutputDataType >Implementation of the LinearNoBias class
 CLogisticFunctionThe logistic function, defined by
 CLogSoftMax< InputDataType, OutputDataType >Implementation of the log softmax layer
 CLookup< InputDataType, OutputDataType >Implementation of the Lookup class
 CLSTM< InputDataType, OutputDataType >An implementation of a lstm network layer
 CMaxPooling< InputDataType, OutputDataType >Implementation of the MaxPooling layer
 CMaxPoolingRule
 CMeanPooling< InputDataType, OutputDataType >Implementation of the MeanPooling
 CMeanPoolingRule
 CMeanSquaredError< InputDataType, OutputDataType >The mean squared error performance function measures the network's performance according to the mean of squared errors
 CMultiplyConstant< InputDataType, OutputDataType >Implementation of the multiply constant layer
 CMultiplyMerge< InputDataType, OutputDataType, CustomLayers >Implementation of the MultiplyMerge module class
 CNaiveConvolution< BorderMode >Computes the two-dimensional convolution
 CNegativeLogLikelihood< InputDataType, OutputDataType >Implementation of the negative log likelihood layer
 CNetworkInitialization< InitializationRuleType, CustomLayers >This class is used to initialize the network with the given initialization rule
 CNguyenWidrowInitializationThis class is used to initialize the weight matrix with the Nguyen-Widrow method
 COivsInitialization< ActivationFunction >This class is used to initialize the weight matrix with the oivs method
 COrthogonalInitializationThis class is used to initialize the weight matrix with the orthogonal matrix initialization
 CPReLU< InputDataType, OutputDataType >The PReLU activation function, defined by (where alpha is trainable)
 CRandomInitializationThis class is used to initialize randomly the weight matrix
 CRectifierFunctionThe rectifier function, defined by
 CRecurrent< InputDataType, OutputDataType, CustomLayers >Implementation of the RecurrentLayer class
 CRecurrentAttention< InputDataType, OutputDataType >This class implements the Recurrent Model for Visual Attention, using a variety of possible layer implementations
 CReinforceNormal< InputDataType, OutputDataType >Implementation of the reinforce normal layer
 CRNN< OutputLayerType, InitializationRuleType, CustomLayers >Implementation of a standard recurrent neural network container
 CSelect< InputDataType, OutputDataType >The select module selects the specified column from a given input matrix
 CSequential< InputDataType, OutputDataType, CustomLayers >Implementation of the Sequential class
 CSigmoidCrossEntropyError< InputDataType, OutputDataType >The SigmoidCrossEntropyError performance function measures the network's performance according to the cross-entropy function between the input and target distributions
 CSoftplusFunctionThe softplus function, defined by
 CSoftsignFunctionThe softsign function, defined by
 CSVDConvolution< BorderMode >Computes the two-dimensional convolution using singular value decomposition
 CSwishFunctionThe swish function, defined by
 CTanhFunctionThe tanh function, defined by
 CTransposedConvolution< ForwardConvolutionRule, BackwardConvolutionRule, GradientConvolutionRule, InputDataType, OutputDataType >Implementation of the Transposed Convolution class
 CValidConvolution
 CVRClassReward< InputDataType, OutputDataType >Implementation of the variance reduced classification reinforcement layer
 CBacktraceProvides a backtrace
 CCLIOption< N >A static object whose constructor registers a parameter with the CLI class
 CParameterType< T >Utility struct to return the type that boost::program_options should accept for a given input type
 CParameterType< arma::Col< eT > >For vector types, boost::program_options will accept a std::string, not an arma::Col<eT> (since it is not clear how to specify a vector on the command-line)
 CParameterType< arma::Mat< eT > >For matrix types, boost::program_options will accept a std::string, not an arma::mat (since it is not clear how to specify a matrix on the command-line)
 CParameterType< arma::Row< eT > >For row vector types, boost::program_options will accept a std::string, not an arma::Row<eT> (since it is not clear how to specify a vector on the command-line)
 CParameterType< std::tuple< mlpack::data::DatasetMapper< PolicyType, std::string >, arma::Mat< eT > > >For matrix+dataset info types, we should accept a std::string
 CParameterTypeDeducer< HasSerialize, T >
 CParameterTypeDeducer< true, T >
 CProgramDocA static object whose constructor registers program documentation with the CLI class
 CPyOption< T >The Python option class
 CProgramDocA static object whose constructor registers program documentation with the CLI class
 CTestOption< N >A static object whose constructor registers a parameter with the CLI class
 CBallBound< MetricType, VecType >Ball bound encloses a set of points at a specific distance (radius) from a specific point (center)
 CBoundTraits< BoundType >A class to obtain compile-time traits about BoundType classes
 CBoundTraits< BallBound< MetricType, VecType > >A specialization of BoundTraits for this bound type
 CBoundTraits< CellBound< MetricType, ElemType > >
 CBoundTraits< HollowBallBound< MetricType, ElemType > >A specialization of BoundTraits for this bound type
 CBoundTraits< HRectBound< MetricType, ElemType > >
 CCellBound< MetricType, ElemType >The CellBound class describes a bound that consists of a number of hyperrectangles
 CHollowBallBound< TMetricType, ElemType >Hollow ball bound encloses a set of points at a specific distance (radius) from a specific point (center) except points at a specific distance from another point (the center of the hole)
 CHRectBound< MetricType, ElemType >Hyper-rectangle bound for an L-metric
 CIsLMetric< MetricType >Utility struct where Value is true if and only if the argument is of type LMetric
 CIsLMetric< metric::LMetric< Power, TakeRoot > >Specialization for IsLMetric when the argument is of type LMetric
 CBatchSVDPolicyImplementation of the Batch SVD policy to act as a wrapper when accessing Batch SVD from within CFType
 CCFTypeThis class implements Collaborative Filtering (CF)
 CDummyClassThis class acts as a dummy class for passing as template parameter
 CNMFPolicyImplementation of the NMF policy to act as a wrapper when accessing NMF from within CFType
 CRandomizedSVDPolicyImplementation of the Randomized SVD policy to act as a wrapper when accessing Randomized SVD from within CFType
 CRegSVDPolicyImplementation of the Regularized SVD policy to act as a wrapper when accessing Regularized SVD from within CFType
 CSVDCompletePolicyImplementation of the SVD complete incremental policy to act as a wrapper when accessing SVD complete decomposition from within CFType
 CSVDIncompletePolicyImplementation of the SVD incomplete incremental to act as a wrapper when accessing SVD incomplete incremental from within CFType
 CSVDWrapper< Factorizer >This class acts as the wrapper for all SVD factorizers which are incompatible with CF module
 CCLIParses the command line for parameters and holds user-specified parameters
 CAccuracyThe Accuracy is a metric of performance for classification algorithms that is equal to a proportion of correctly labeled test items among all ones for given test items
 CCVBase< MLAlgorithm, MatType, PredictionsType, WeightsType >An auxiliary class for cross-validation
 CF1< AS, PositiveClass >F1 is a metric of performance for classification algorithms that for binary classification is equal to $ 2 * precision * recall / (precision + recall) $
 CKFoldCV< MLAlgorithm, Metric, MatType, PredictionsType, WeightsType >The class KFoldCV implements k-fold cross-validation for regression and classification algorithms
 CMetaInfoExtractor< MLAlgorithm, MT, PT, WT >MetaInfoExtractor is a tool for extracting meta information about a given machine learning algorithm
 CMSEThe MeanSquaredError is a metric of performance for regression algorithms that is equal to the mean squared error between predicted values and ground truth (correct) values for given test items
 CNotFoundMethodForm
 CPrecision< AS, PositiveClass >Precision is a metric of performance for classification algorithms that for binary classification is equal to $ tp / (tp + fp) $, where $ tp $ and $ fp $ are the numbers of true positives and false positives respectively
 CRecall< AS, PositiveClass >Recall is a metric of performance for classification algorithms that for binary classification is equal to $ tp / (tp + fn) $, where $ tp $ and $ fn $ are the numbers of true positives and false negatives respectively
 CSelectMethodForm< MLAlgorithm, HMFs >A type function that selects a right method form
 CSelectMethodForm< MLAlgorithm >
 CSelectMethodForm< MLAlgorithm >::From< Forms >
 CSelectMethodForm< MLAlgorithm, HasMethodForm, HMFs... >
 CSelectMethodForm< MLAlgorithm, HasMethodForm, HMFs... >::From< Forms >
 CSimpleCV< MLAlgorithm, Metric, MatType, PredictionsType, WeightsType >SimpleCV splits data into two sets - training and validation sets - and then runs training on the training set and evaluates performance on the validation set
 CTrainForm< MatType, PredictionsType, WeightsType, DatasetInfo, NumClasses >A wrapper struct for holding a Train form
 CTrainFormBase< PT, WT, SignatureParams >
 CCustomImputation< T >A simple custom imputation class
 CDatasetMapper< PolicyType, InputType >Auxiliary information for a dataset, including mappings to/from strings (or other types) and the datatype of each dimension
 CHasSerialize< T >
 CHasSerialize< T >::check< U, V, W >
 CHasSerializeFunction< T >
 CImputer< T, MapperType, StrategyType >Given a dataset of a particular datatype, replace user-specified missing value with a variable dependent on the StrategyType and MapperType
 CIncrementPolicyIncrementPolicy is used as a helper class for DatasetMapper
 CListwiseDeletion< T >A complete-case analysis to remove the values containing mappedValue
 CLoadCSVLoad the csv file.This class use boost::spirit to implement the parser, please refer to following link http://theboostcpplibraries.com/boost.spirit for quick review
 CMeanImputation< T >A simple mean imputation class
 CMedianImputation< T >This is a class implementation of simple median imputation
 CMissingPolicyMissingPolicy is used as a helper class for DatasetMapper
 CDBSCAN< RangeSearchType, PointSelectionPolicy >DBSCAN (Density-Based Spatial Clustering of Applications with Noise) is a clustering technique described in the following paper:
 CRandomPointSelectionThis class can be used to randomly select the next point to use for DBSCAN
 CDecisionStump< MatType >This class implements a decision stump
 CDTree< MatType, TagType >A density estimation tree is similar to both a decision tree and a space partitioning tree (like a kd-tree)
 CPathCacherThis class is responsible for caching the path to each node of the tree
 CDiscreteDistributionA discrete distribution where the only observations are discrete observations
 CGammaDistributionThis class represents the Gamma distribution
 CGaussianDistributionA single multivariate Gaussian distribution
 CLaplaceDistributionThe multivariate Laplace distribution centered at 0 has pdf
 CRegressionDistributionA class that represents a univariate conditionally Gaussian distribution
 CDTBRules< MetricType, TreeType >
 CDTBStatA statistic for use with mlpack trees, which stores the upper bound on distance to nearest neighbors and the component which this node belongs to
 CDualTreeBoruvka< MetricType, MatType, TreeType >Performs the MST calculation using the Dual-Tree Boruvka algorithm, using any type of tree
 CEdgePairAn edge pair is simply two indices and a distance
 CUnionFindA Union-Find data structure
 CFastMKS< KernelType, MatType, TreeType >An implementation of fast exact max-kernel search
 CFastMKSModelA utility struct to contain all the possible FastMKS models, for use by the mlpack_fastmks program
 CFastMKSRules< KernelType, TreeType >The FastMKSRules class is a template helper class used by FastMKS class when performing exact max-kernel search
 CFastMKSStatThe statistic used in trees with FastMKS
 CDiagonalConstraintForce a covariance matrix to be diagonal
 CEigenvalueRatioConstraintGiven a vector of eigenvalue ratios, ensure that the covariance matrix always has those eigenvalue ratios
 CEMFit< InitialClusteringType, CovarianceConstraintPolicy >This class contains methods which can fit a GMM to observations using the EM algorithm
 CGMMA Gaussian Mixture Model (GMM)
 CNoConstraintThis class enforces no constraint on the covariance matrix
 CPositiveDefiniteConstraintGiven a covariance matrix, force the matrix to be positive definite
 CHMM< Distribution >A class that represents a Hidden Markov Model with an arbitrary type of emission distribution
 CHMMModelA serializable HMM model that also stores the type
 CCVFunction< CVType, MLAlgorithm, TotalArgs, BoundArgs >This wrapper serves for adapting the interface of the cross-validation classes to the one that can be utilized by the mlpack optimizers
 CDeduceHyperParameterTypes< Args >A type function for deducing types of hyper-parameters from types of arguments in the Optimize method in HyperParameterTuner
 CDeduceHyperParameterTypes< Args >::ResultHolder< HPTypes >
 CDeduceHyperParameterTypes< PreFixedArg< T >, Args... >Defining DeduceHyperParameterTypes for the case when not all argument types have been processed, and the next one is the type of an argument that should be fixed
 CDeduceHyperParameterTypes< PreFixedArg< T >, Args... >::ResultHolder< HPTypes >
 CDeduceHyperParameterTypes< T, Args... >Defining DeduceHyperParameterTypes for the case when not all argument types have been processed, and the next one (T) is a collection type or an arithmetic type
 CDeduceHyperParameterTypes< T, Args... >::IsCollectionType< Type >A type function to check whether Type is a collection type (for that it should define value_type)
 CDeduceHyperParameterTypes< T, Args... >::ResultHolder< HPTypes >
 CDeduceHyperParameterTypes< T, Args... >::ResultHPType< ArgumentType, IsArithmetic >A type function to deduce the result hyper-parameter type for ArgumentType
 CDeduceHyperParameterTypes< T, Args... >::ResultHPType< ArithmeticType, true >
 CDeduceHyperParameterTypes< T, Args... >::ResultHPType< CollectionType, false >
 CFixedArg< T, I >A struct for storing information about a fixed argument
 CHyperParameterTuner< MLAlgorithm, Metric, CV, OptimizerType, MatType, PredictionsType, WeightsType >The class HyperParameterTuner for the given MLAlgorithm utilizes the provided Optimizer to find the values of hyper-parameters that optimize the value of the given Metric
 CIsPreFixedArg< T >A type function for checking whether the given type is PreFixedArg
 CPreFixedArg< T >A struct for marking arguments as ones that should be fixed (it can be useful for the Optimize method of HyperParameterTuner)
 CPreFixedArg< T & >The specialization of the template for references
 CCosineDistanceThe cosine distance (or cosine similarity)
 CEpanechnikovKernelThe Epanechnikov kernel, defined as
 CExampleKernelAn example kernel function
 CGaussianKernelThe standard Gaussian kernel
 CHyperbolicTangentKernelHyperbolic tangent kernel
 CKernelTraits< KernelType >This is a template class that can provide information about various kernels
 CKernelTraits< CosineDistance >Kernel traits for the cosine distance
 CKernelTraits< EpanechnikovKernel >Kernel traits for the Epanechnikov kernel
 CKernelTraits< GaussianKernel >Kernel traits for the Gaussian kernel
 CKernelTraits< LaplacianKernel >Kernel traits of the Laplacian kernel
 CKernelTraits< SphericalKernel >Kernel traits for the spherical kernel
 CKernelTraits< TriangularKernel >Kernel traits for the triangular kernel
 CKMeansSelection< ClusteringType, maxIterations >Implementation of the kmeans sampling scheme
 CLaplacianKernelThe standard Laplacian kernel
 CLinearKernelThe simple linear kernel (dot product)
 CNystroemMethod< KernelType, PointSelectionPolicy >
 COrderedSelection
 CPolynomialKernelThe simple polynomial kernel
 CPSpectrumStringKernelThe p-spectrum string kernel
 CRandomSelection
 CSphericalKernelThe spherical kernel, which is 1 when the distance between the two argument points is less than or equal to the bandwidth, or 0 otherwise
 CTriangularKernelThe trivially simple triangular kernel, defined by
 CAllowEmptyClustersPolicy which allows K-Means to create empty clusters without any error being reported
 CDualTreeKMeans< MetricType, MatType, TreeType >An algorithm for an exact Lloyd iteration which simply uses dual-tree nearest-neighbor search to find the nearest centroid for each point in the dataset
 CDualTreeKMeansRules< MetricType, TreeType >
 CElkanKMeans< MetricType, MatType >
 CHamerlyKMeans< MetricType, MatType >
 CKillEmptyClustersPolicy which allows K-Means to "kill" empty clusters without any error being reported
 CKMeans< MetricType, InitialPartitionPolicy, EmptyClusterPolicy, LloydStepType, MatType >This class implements K-Means clustering, using a variety of possible implementations of Lloyd's algorithm
 CMaxVarianceNewClusterWhen an empty cluster is detected, this class takes the point furthest from the centroid of the cluster with maximum variance as a new cluster
 CNaiveKMeans< MetricType, MatType >This is an implementation of a single iteration of Lloyd's algorithm for k-means
 CPellegMooreKMeans< MetricType, MatType >An implementation of Pelleg-Moore's 'blacklist' algorithm for k-means clustering
 CPellegMooreKMeansRules< MetricType, TreeType >The rules class for the single-tree Pelleg-Moore kd-tree traversal for k-means clustering
 CPellegMooreKMeansStatisticA statistic for trees which holds the blacklist for Pelleg-Moore k-means clustering (which represents the clusters that cannot possibly own any points in a node)
 CRandomPartitionA very simple partitioner which partitions the data randomly into the number of desired clusters
 CRefinedStartA refined approach for choosing initial points for k-means clustering
 CSampleInitialization
 CKernelPCA< KernelType, KernelRule >This class performs kernel principal components analysis (Kernel PCA), for a given kernel
 CNaiveKernelRule< KernelType >
 CNystroemKernelRule< KernelType, PointSelectionPolicy >
 CLocalCoordinateCodingAn implementation of Local Coordinate Coding (LCC) that codes data which approximately lives on a manifold using a variation of l1-norm regularized sparse coding; in LCC, the penalty on the absolute value of each point's coefficient for each atom is weighted by the squared distance of that point to that atom
 CLogProvides a convenient way to give formatted output
 CColumnsToBlocksTransform the columns of the given matrix into a block format
 CRangeType< T >Simple real-valued range
 CMatrixCompletionThis class implements the popular nuclear norm minimization heuristic for matrix completion problems
 CMeanShift< UseKernel, KernelType, MatType >This class implements mean shift clustering
 CIPMetric< KernelType >The inner product metric, IPMetric, takes a given Mercer kernel (KernelType), and when Evaluate() is called, returns the distance between the two points in kernel space:
 CLMetric< TPower, TTakeRoot >The L_p metric for arbitrary integer p, with an option to take the root
 CMahalanobisDistance< TakeRoot >The Mahalanobis distance, which is essentially a stretched Euclidean distance
 CMVUMeant to provide a good abstraction for users
 CNaiveBayesClassifier< ModelMatType >The simple Naive Bayes classifier
 CNCA< MetricType, OptimizerType >An implementation of Neighborhood Components Analysis, both a linear dimensionality reduction technique and a distance learning technique
 CSoftmaxErrorFunction< MetricType >The "softmax" stochastic neighbor assignment probability function
 CDrusillaSelect< MatType >
 CFurthestNeighborSortThis class implements the necessary methods for the SortPolicy template parameter of the NeighborSearch class
 CLSHSearch< SortPolicy >The LSHSearch class; this class builds a hash on the reference set and uses this hash to compute the distance-approximate nearest-neighbors of the given queries
 CNearestNeighborSortThis class implements the necessary methods for the SortPolicy template parameter of the NeighborSearch class
 CNeighborSearch< SortPolicy, MetricType, MatType, TreeType, DualTreeTraversalType, SingleTreeTraversalType >The NeighborSearch class is a template class for performing distance-based neighbor searches
 CNeighborSearchRules< SortPolicy, MetricType, TreeType >The NeighborSearchRules class is a template helper class used by NeighborSearch class when performing distance-based neighbor searches
 CNeighborSearchRules< SortPolicy, MetricType, TreeType >::CandidateCmpCompare two candidates based on the distance
 CNeighborSearchStat< SortPolicy >Extra data for each node in the tree
 CNSModel< SortPolicy >The NSModel class provides an easy way to serialize a model, abstracts away the different types of trees, and also reflects the NeighborSearch API
 CQDAFN< MatType >
 CRAModel< SortPolicy >The RAModel class provides an abstraction for the RASearch class, abstracting away the TreeType parameter and allowing it to be specified at runtime in this class
 CRAQueryStat< SortPolicy >Extra data for each node in the tree
 CRASearch< SortPolicy, MetricType, MatType, TreeType >The RASearch class: This class provides a generic manner to perform rank-approximate search via random-sampling
 CRASearchRules< SortPolicy, MetricType, TreeType >The RASearchRules class is a template helper class used by RASearch class when performing rank-approximate search via random-sampling
 CRAUtil
 CSparseAutoencoderA sparse autoencoder is a neural network whose aim to learn compressed representations of the data, typically for dimensionality reduction, with a constraint on the activity of the neurons in the network
 CSparseAutoencoderFunctionThis is a class for the sparse autoencoder objective function
 CAdaDeltaAdaDelta is an optimizer that uses two ideas to improve upon the two main drawbacks of the Adagrad method:
 CAdaDeltaUpdateImplementation of the AdaDelta update policy
 CAdaGradAdaGrad is a modified version of stochastic gradient descent which performs larger updates for more sparse parameters and smaller updates for less sparse parameters
 CAdaGradUpdateImplementation of the AdaGrad update policy
 CAdaMaxUpdateAdaMax is a variant of Adam, an optimizer that computes individual adaptive learning rates for different parameters from estimates of first and second moments of the gradients.based on the infinity norm as given in the section 7 of the following paper
 CAdamType< UpdateRule >Adam is an optimizer that computes individual adaptive learning rates for different parameters from estimates of first and second moments of the gradients
 CAdamUpdateAdam is an optimizer that computes individual adaptive learning rates for different parameters from estimates of first and second moments of the gradients as given in the section 7 of the following paper
 CAdaptiveStepsizeDefinition of the adaptive stepize technique, a non-monotonic stepsize scheme that uses curvature estimates to propose new stepsize choices
 CAddDecomposableEvaluate< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableEvaluate >The AddDecomposableEvaluate mixin class will add a decomposable Evaluate() method if a decomposable EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableEvaluate< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddDecomposableEvaluate< FunctionType, true, false >If we have a decomposable EvaluateWithGradient() but not a decomposable Evaluate(), add a decomposable Evaluate() method
 CAddDecomposableEvaluateConst< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableEvaluate >The AddDecomposableEvaluateConst mixin class will add a decomposable const Evaluate() method if a decomposable const EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableEvaluateConst< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddDecomposableEvaluateConst< FunctionType, true, false >If we have a decomposable const EvaluateWithGradient() but not a decomposable const Evaluate(), add a decomposable const Evaluate() method
 CAddDecomposableEvaluateStatic< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableEvaluate >The AddDecomposableEvaluateStatic mixin class will add a decomposable static Evaluate() method if a decomposable static EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableEvaluateStatic< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddDecomposableEvaluateStatic< FunctionType, true, false >If we have a decomposable EvaluateWithGradient() but not a decomposable Evaluate(), add a decomposable Evaluate() method
 CAddDecomposableEvaluateWithGradient< FunctionType, HasDecomposableEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddDecomposableEvaluateWithGradient< FunctionType, true, false >If we have a both decomposable Evaluate() and a decomposable Gradient() but not a decomposable EvaluateWithGradient(), add a decomposable EvaluateWithGradient() method
 CAddDecomposableEvaluateWithGradientConst< FunctionType, HasDecomposableEvaluateGradient, HasDecomposableEvaluateWithGradient >The AddDecomposableEvaluateWithGradientConst mixin class will add a decomposable const EvaluateWithGradient() method if both a decomposable const Evaluate() and a decomposable const Gradient() function exist, or nothing otherwise
 CAddDecomposableEvaluateWithGradientConst< FunctionType, HasDecomposableEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddDecomposableEvaluateWithGradientConst< FunctionType, true, false >If we have both a decomposable const Evaluate() and a decomposable const Gradient() but not a decomposable const EvaluateWithGradient(), add a decomposable const EvaluateWithGradient() method
 CAddDecomposableEvaluateWithGradientStatic< FunctionType, HasDecomposableEvaluateGradient, HasDecomposableEvaluateWithGradient >The AddDecomposableEvaluateWithGradientStatic mixin class will add a decomposable static EvaluateWithGradient() method if both a decomposable static Evaluate() and a decomposable static gradient() function exist, or nothing otherwise
 CAddDecomposableEvaluateWithGradientStatic< FunctionType, HasDecomposableEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddDecomposableEvaluateWithGradientStatic< FunctionType, true, false >If we have a decomposable static Evaluate() and a decomposable static Gradient() but not a decomposable static EvaluateWithGradient(), add a decomposable static Gradient() method
 CAddDecomposableGradient< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableGradient >The AddDecomposableGradient mixin class will add a decomposable Gradient() method if a decomposable EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableGradient< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddDecomposableGradient< FunctionType, true, false >If we have a decomposable EvaluateWithGradient() but not a decomposable Gradient(), add a decomposable Evaluate() method
 CAddDecomposableGradientConst< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableGradient >The AddDecomposableGradientConst mixin class will add a decomposable const Gradient() method if a decomposable const EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableGradientConst< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddDecomposableGradientConst< FunctionType, true, false >If we have a decomposable const EvaluateWithGradient() but not a decomposable const Gradient(), add a decomposable const Gradient() method
 CAddDecomposableGradientStatic< FunctionType, HasDecomposableEvaluateWithGradient, HasDecomposableGradient >The AddDecomposableEvaluateStatic mixin class will add a decomposable static Gradient() method if a decomposable static EvaluateWithGradient() function exists, or nothing otherwise
 CAddDecomposableGradientStatic< FunctionType, HasDecomposableEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddDecomposableGradientStatic< FunctionType, true, false >If we have a decomposable EvaluateWithGradient() but not a decomposable Gradient(), add a decomposable Gradient() method
 CAddEvaluate< FunctionType, HasEvaluateWithGradient, HasEvaluate >The AddEvaluate mixin class will provide an Evaluate() method if the given FunctionType has EvaluateWithGradient(), or nothing otherwise
 CAddEvaluate< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddEvaluate< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Evaluate(), add an Evaluate() method
 CAddEvaluateConst< FunctionType, HasEvaluateWithGradient, HasEvaluate >The AddEvaluateConst mixin class will provide a const Evaluate() method if the given FunctionType has EvaluateWithGradient() const, or nothing otherwise
 CAddEvaluateConst< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddEvaluateConst< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Evaluate(), add an Evaluate() without a using directive to make the base Evaluate() accessible
 CAddEvaluateStatic< FunctionType, HasEvaluateWithGradient, HasEvaluate >The AddEvaluateStatic mixin class will provide a static Evaluate() method if the given FunctionType has EvaluateWithGradient() static, or nothing otherwise
 CAddEvaluateStatic< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Evaluate()
 CAddEvaluateStatic< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Evaluate(), add an Evaluate() without a using directive to make the base Evaluate() accessible
 CAddEvaluateWithGradient< FunctionType, HasEvaluateGradient, HasEvaluateWithGradient >The AddEvaluateWithGradient mixin class will provide an EvaluateWithGradient() method if the given FunctionType has both Evaluate() and Gradient(), or it will provide nothing otherwise
 CAddEvaluateWithGradient< FunctionType, HasEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddEvaluateWithGradient< FunctionType, true, false >If the FunctionType has Evaluate() and Gradient(), provide EvaluateWithGradient()
 CAddEvaluateWithGradientConst< FunctionType, HasEvaluateGradient, HasEvaluateWithGradient >The AddEvaluateWithGradient mixin class will provide an EvaluateWithGradient() const method if the given FunctionType has both Evaluate() const and Gradient() const, or it will provide nothing otherwise
 CAddEvaluateWithGradientConst< FunctionType, HasEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddEvaluateWithGradientConst< FunctionType, true, false >If the FunctionType has Evaluate() const and Gradient() const, provide EvaluateWithGradient() const
 CAddEvaluateWithGradientStatic< FunctionType, HasEvaluateGradient, HasEvaluateWithGradient >The AddEvaluateWithGradientStatic mixin class will provide a static EvaluateWithGradient() method if the given FunctionType has both static Evaluate() and static Gradient(), or it will provide nothing otherwise
 CAddEvaluateWithGradientStatic< FunctionType, HasEvaluateGradient, true >Reflect the existing EvaluateWithGradient()
 CAddEvaluateWithGradientStatic< FunctionType, true, false >If the FunctionType has static Evaluate() and static Gradient(), provide static EvaluateWithGradient()
 CAddGradient< FunctionType, HasEvaluateWithGradient, HasGradient >The AddGradient mixin class will provide a Gradient() method if the given FunctionType has EvaluateWithGradient(), or nothing otherwise
 CAddGradient< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddGradient< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Gradient(), add an Gradient() without a using directive to make the base Gradient() accessible
 CAddGradientConst< FunctionType, HasEvaluateWithGradient, HasGradient >The AddGradient mixin class will provide a const Gradient() method if the given FunctionType has EvaluateWithGradient() const, or nothing otherwise
 CAddGradientConst< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddGradientConst< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Gradient(), add a Gradient() without a using directive to make the base Gradient() accessible
 CAddGradientStatic< FunctionType, HasEvaluateWithGradient, HasGradient >The AddGradient mixin class will provide a static Gradient() method if the given FunctionType has static EvaluateWithGradient(), or nothing otherwise
 CAddGradientStatic< FunctionType, HasEvaluateWithGradient, true >Reflect the existing Gradient()
 CAddGradientStatic< FunctionType, true, false >If we have EvaluateWithGradient() but no existing Gradient(), add a Gradient() without a using directive to make the base Gradient() accessible
 CAMSGradUpdateAMSGrad is an exponential moving average variant which along with having benefits of optimizers like Adam and RMSProp, also guarantees convergence
 CAtomsClass to hold the information and operations of current atoms in the soluton space
 CAugLagrangianImplements the Augmented Lagrangian method of optimization
 CAugLagrangianFunction< LagrangianFunction >This is a utility class used by AugLagrangian, meant to wrap a LagrangianFunction into a function usable by a simple optimizer like L-BFGS
 CAugLagrangianTestFunctionThis function is taken from "Practical Mathematical Optimization" (Snyman), section 5.3.8 ("Application of the Augmented Lagrangian Method")
 CBacktrackingLineSearchDefinition of the backtracking line search algorithm based on the Armijo–Goldstein condition to determine the maximum amount to move along the given search direction
 CBarzilaiBorweinDecayBarzilai-Borwein decay policy for Stochastic variance reduced gradient (SVRG)
 CBigBatchSGD< UpdatePolicyType >Big-batch Stochastic Gradient Descent is a technique for minimizing a function which can be expressed as a sum of other functions
 CCMAES< SelectionPolicyType >CMA-ES - Covariance Matrix Adaptation Evolution Strategy is s a stochastic search algorithm
 CCNEConventional Neural Evolution (CNE) is a class of evolutionary algorithms focused on dealing with fixed topology
 CConstantStepImplementation of the ConstantStep stepsize decay policy for parallel SGD
 CConstrLpBallSolverLinearConstrSolver for FrankWolfe algorithm
 CConstrStructGroupSolver< GroupType >Linear Constrained Solver for FrankWolfe
 CCyclicalDecaySimulate a new warm-started run/restart once a number of epochs are performed
 CCyclicDescentCyclic descent policy for Stochastic Coordinate Descent(SCD)
 CExponentialBackoffExponential backoff stepsize reduction policy for parallel SGD
 CExponentialScheduleThe exponential cooling schedule cools the temperature T at every step according to the equation
 CFrankWolfe< LinearConstrSolverType, UpdateRuleType >Frank-Wolfe is a technique to minimize a continuously differentiable convex function $ f $ over a compact convex subset $ D $ of a vector space
 CFullSelection
 CFuncSqSquare loss function $ f(x) = 0.5 * ||Ax - b||_2^2 $
 CGockenbachFunctionThis function is taken from M
 CGradientClipping< UpdatePolicyType >Interface for wrapping around update policies (e.g., VanillaUpdate) and feeding a clipped gradient to them instead of the normal one
 CGradientDescentGradient Descent is a technique to minimize a function
 CGreedyDescentGreedy descent policy for Stochastic Co-ordinate Descent(SCD)
 CGridSearchAn optimizer that finds the minimum of a given function by iterating through points on a multidimensional grid
 CGroupLpBallImplementation of Structured Group
 CIQNIQN is a technique for minimizing a function which can be expressed as a sum of other functions
 CKatyushaType< Proximal >Katyusha is a direct, primal-only stochastic gradient method which uses a "negative momentum" on top of Nesterov’s momentum
 CL_BFGSThe generic L-BFGS optimizer, which uses a back-tracking line search algorithm to minimize a function
 CLineSearchFind the minimum of a function along the line between two points
 CLovaszThetaSDPThis function is the Lovasz-Theta semidefinite program, as implemented in the following paper:
 CLRSDP< SDPType >LRSDP is the implementation of Monteiro and Burer's formulation of low-rank semidefinite programs (LR-SDP)
 CLRSDPFunction< SDPType >The objective function that LRSDP is trying to optimize
 CNadaMaxUpdateNadaMax is an optimizer that combines the AdaMax and NAG
 CNadamUpdateNadam is an optimizer that combines the Adam and NAG optimization strategies
 CNesterovMomentumUpdateNesterov Momentum update policy for Stochastic Gradient Descent (SGD)
 CNoDecayDefinition of the NoDecay class
 COptimisticAdamUpdateOptimisticAdam is an optimizer which implements the Optimistic Adam algorithm which uses Optmistic Mirror Descent with the Adam Optimizer
 CParallelSGD< DecayPolicyType >An implementation of parallel stochastic gradient descent using the lock-free HOGWILD! approach
 CPrimalDualSolver< SDPType >Interface to a primal dual interior point solver
 CProximalApproximate a vector with another vector on lp ball
 CRandomDescentRandom descent policy for Stochastic Coordinate Descent(SCD)
 CRandomSelection
 CRMSPropRMSProp is an optimizer that utilizes the magnitude of recent gradients to normalize the gradients
 CRMSPropUpdateRMSProp is an optimizer that utilizes the magnitude of recent gradients to normalize the gradients
 CSA< CoolingScheduleType >Simulated Annealing is an stochastic optimization algorithm which is able to deliver near-optimal results quickly without knowing the gradient of the function being optimized
 CSARAHPlusUpdateSARAH+ provides an automatic and adaptive choice of the inner loop size
 CSARAHType< UpdatePolicyType >StochAstic Recusive gRadient algoritHm (SARAH)
 CSARAHUpdateVanilla update policy for SARAH
 CSCD< DescentPolicyType >Stochastic Coordinate descent is a technique for minimizing a function by doing a line search along a single direction at the current point in the iteration
 CSDP< ObjectiveMatrixType >Specify an SDP in primal form
 CSGD< UpdatePolicyType, DecayPolicyType >Stochastic Gradient Descent is a technique for minimizing a function which can be expressed as a sum of other functions
 CSGDR< UpdatePolicyType >This class is based on Mini-batch Stochastic Gradient Descent class and simulates a new warm-started run/restart once a number of epochs are performed
 CSMORMS3SMORMS3 is an optimizer that estimates a safe and optimal distance based on curvature and normalizing the stepsize in the parameter space
 CSMORMS3UpdateSMORMS3 is an optimizer that estimates a safe and optimal distance based on curvature and normalizing the stepsize in the parameter space
 CSnapshotEnsemblesSimulate a new warm-started run/restart once a number of epochs are performed
 CSnapshotSGDR< UpdatePolicyType >This class is based on Mini-batch Stochastic Gradient Descent class and simulates a new warm-started run/restart once a number of epochs are performed using the Snapshot ensembles technique
 CSPALeRASGD< DecayPolicyType >SPALeRA Stochastic Gradient Descent is a technique for minimizing a function which can be expressed as a sum of other functions
 CSPALeRAStepsizeDefinition of the SPALeRA stepize technique, which implementes a change detection mechanism with an agnostic adaptation scheme
 CSVRGType< UpdatePolicyType, DecayPolicyType >Stochastic Variance Reduced Gradient is a technique for minimizing a function which can be expressed as a sum of other functions
 CSVRGUpdateVanilla update policy for Stochastic variance reduced gradient (SVRG)
 CBoothFunctionThe Booth function, defined by
 CBukinFunctionThe Bukin function, defined by
 CColvilleFunctionThe Colville function, defined by
 CDropWaveFunctionThe Drop-Wave function, defined by
 CEasomFunctionThe Easom function, defined by
 CEggholderFunctionThe Eggholder function, defined by
 CGDTestFunctionVery, very simple test function which is the composite of three other functions
 CGeneralizedRosenbrockFunctionThe Generalized Rosenbrock function in n dimensions, defined by f(x) = sum_i^{n - 1} (f(i)(x)) f_i(x) = 100 * (x_i^2 - x_{i + 1})^2 + (1 - x_i)^2 x_0 = [-1.2, 1, -1.2, 1, ...]
 CMatyasFunctionThe Matyas function, defined by
 CMcCormickFunctionThe McCormick function, defined by
 CRastriginFunctionThe Rastrigin function, defined by
 CRosenbrockFunctionThe Rosenbrock function, defined by:
 CRosenbrockWoodFunctionThe Generalized Rosenbrock function in 4 dimensions with the Wood Function in four dimensions
 CSchwefelFunctionThe Schwefel function, defined by
 CSGDTestFunctionVery, very simple test function which is the composite of three other functions
 CSparseTestFunction
 CSphereFunctionThe Sphere function, defined by
 CStyblinskiTangFunctionThe Styblinski-Tang function, defined by
 CWoodFunctionThe Wood function, defined by f(x) = f1(x) + f2(x) + f3(x) + f4(x) + f5(x) + f6(x) f1(x) = 100 (x2 - x1^2)^2 f2(x) = (1 - x1)^2 f3(x) = 90 (x4 - x3^2)^2 f4(x) = (1 - x3)^2 f5(x) = 10 (x2 + x4 - 2)^2 f6(x) = (1 / 10) (x2 - x4)^2 x_0 = [-3, -1, -3, -1]
 CTestFuncFWSimple test function for classic Frank Wolfe Algorithm:
 CCheckDecomposableEvaluate< FunctionType >Check if a suitable decomposable overload of Evaluate() is available
 CCheckDecomposableEvaluateWithGradient< FunctionType >Check if a suitable decomposable overload of EvaluateWithGradient() is available
 CCheckDecomposableGradient< FunctionType >Check if a suitable decomposable overload of Gradient() is available
 CCheckEvaluate< FunctionType >Check if a suitable overload of Evaluate() is available
 CCheckEvaluateConstraint< FunctionType >Check if a suitable overload of EvaluateConstraint() is available
 CCheckEvaluateWithGradient< FunctionType >Check if a suitable overload of EvaluateWithGradient() is available
 CCheckGradient< FunctionType >Check if a suitable overload of Gradient() is available
 CCheckGradientConstraint< FunctionType >Check if a suitable overload of GradientConstraint() is available
 CCheckNumConstraints< FunctionType >Check if a suitable overload of NumConstraints() is available
 CCheckNumFeatures< FunctionType >Check if a suitable overload of NumFeatures() is available
 CCheckNumFunctions< FunctionType >Check if a suitable overload of NumFunctions() is available
 CCheckPartialGradient< FunctionType >Check if a suitable overload of PartialGradient() is available
 CCheckShuffle< FunctionType >Check if a suitable overload of Shuffle() is available
 CCheckSparseGradient< FunctionType >Check if a suitable overload of Gradient() that supports sparse gradients is available
 CHasConstSignatures< ClassType, CheckerA, ConstSignatureA, StaticSignatureA, CheckerB, ConstSignatureB, StaticSignatureB >Utility struct: sometimes we want to know if we have two functions available, and that at least one of them is const and both of them are not non-const and non-static
 CHasNonConstSignatures< ClassType, CheckerA, SignatureA, ConstSignatureA, StaticSignatureA, CheckerB, SignatureB, ConstSignatureB, StaticSignatureB >Utility struct: sometimes we want to know if we have two functions available, and that at least one of them is non-const and non-static
 CUnconstructableTypeThis is a utility type used to provide unusable overloads from each of the mixin classes
 CUpdateClassicUse classic rule in the update step for FrankWolfe algorithm
 CUpdateFullCorrectionFull correction approach to update the solution
 CUpdateLineSearchUse line search in the update step for FrankWolfe algorithm
 CUpdateSpanRecalculate the optimal solution in the span of all previous solution space, used as update step for FrankWolfe algorithm
 CVanillaUpdateVanilla update policy for Stochastic Gradient Descent (SGD)
 CExactSVDPolicyImplementation of the exact SVD policy
 CPCA< DecompositionPolicy >This class implements principal components analysis (PCA)
 CQUICSVDPolicyImplementation of the QUIC-SVD policy
 CRandomizedBlockKrylovSVDPolicyImplementation of the randomized block krylov SVD policy
 CRandomizedSVDPolicyImplementation of the randomized SVD policy
 CPerceptron< LearnPolicy, WeightInitializationPolicy, MatType >This class implements a simple perceptron (i.e., a single layer neural network)
 CRandomInitializationThis class is used to initialize weights for the weightVectors matrix in a random manner
 CSimpleWeightUpdate
 CZeroInitializationThis class is used to initialize the matrix weightVectors to zero
 CRadicalAn implementation of RADICAL, an algorithm for independent component analysis (ICA)
 CRangeSearch< MetricType, MatType, TreeType >The RangeSearch class is a template class for performing range searches
 CRangeSearchRules< MetricType, TreeType >The RangeSearchRules class is a template helper class used by RangeSearch class when performing range searches
 CRangeSearchStatStatistic class for RangeSearch, to be set to the StatisticType of the tree type that range search is being performed with
 CRSModel
 CLARSAn implementation of LARS, a stage-wise homotopy-based algorithm for l1-regularized linear regression (LASSO) and l1+l2 regularized linear regression (Elastic Net)
 CLinearRegressionA simple linear regression algorithm using ordinary least squares
 CLogisticRegression< MatType >The LogisticRegression class implements an L2-regularized logistic regression model, and supports training with multiple optimizers and classification
 CLogisticRegressionFunction< MatType >The log-likelihood function for the logistic regression objective function
 CSoftmaxRegressionSoftmax Regression is a classifier which can be used for classification when the data available can take two or more class values
 CSoftmaxRegressionFunction
 CAcrobatImplementation of Acrobat game
 CAcrobat::State
 CAggregatedPolicy< PolicyType >
 CAsyncLearning< WorkerType, EnvironmentType, NetworkType, UpdaterType, PolicyType >Wrapper of various asynchronous learning algorithms, e.g
 CCartPoleImplementation of Cart Pole task
 CCartPole::StateImplementation of the state of Cart Pole
 CContinuousMountainCarImplementation of Continuous Mountain Car task
 CContinuousMountainCar::ActionImplementation of action of Continuous Mountain Car
 CContinuousMountainCar::StateImplementation of state of Continuous Mountain Car
 CGreedyPolicy< EnvironmentType >Implementation for epsilon greedy policy
 CMountainCarImplementation of Mountain Car task
 CMountainCar::StateImplementation of state of Mountain Car
 CNStepQLearningWorker< EnvironmentType, NetworkType, UpdaterType, PolicyType >Forward declaration of NStepQLearningWorker
 COneStepQLearningWorker< EnvironmentType, NetworkType, UpdaterType, PolicyType >Forward declaration of OneStepQLearningWorker
 COneStepSarsaWorker< EnvironmentType, NetworkType, UpdaterType, PolicyType >Forward declaration of OneStepSarsaWorker
 CPendulumImplementation of Pendulum task
 CPendulum::ActionImplementation of action of Pendulum
 CPendulum::StateImplementation of state of Pendulum
 CQLearning< EnvironmentType, NetworkType, UpdaterType, PolicyType, ReplayType >Implementation of various Q-Learning algorithms, such as DQN, double DQN
 CRandomReplay< EnvironmentType >Implementation of random experience replay
 CTrainingConfig
 CMethodFormDetector< Class, MethodForm, AdditionalArgsCount >
 CMethodFormDetector< Class, MethodForm, 0 >
 CMethodFormDetector< Class, MethodForm, 1 >
 CMethodFormDetector< Class, MethodForm, 2 >
 CMethodFormDetector< Class, MethodForm, 3 >
 CMethodFormDetector< Class, MethodForm, 4 >
 CMethodFormDetector< Class, MethodForm, 5 >
 CMethodFormDetector< Class, MethodForm, 6 >
 CMethodFormDetector< Class, MethodForm, 7 >
 CDataDependentRandomInitializerA data-dependent random dictionary initializer for SparseCoding
 CNothingInitializerA DictionaryInitializer for SparseCoding which does not initialize anything; it is useful for when the dictionary is already known and will be set with SparseCoding::Dictionary()
 CRandomInitializerA DictionaryInitializer for use with the SparseCoding class
 CSparseCodingAn implementation of Sparse Coding with Dictionary Learning that achieves sparsity via an l1-norm regularizer on the codes (LASSO) or an (l1+l2)-norm regularizer on the codes (the Elastic Net)
 CQUIC_SVDQUIC-SVD is a matrix factorization technique, which operates in a subspace such that A's approximation in that subspace has minimum error(A being the data matrix)
 CRandomizedBlockKrylovSVDRandomized block krylov SVD is a matrix factorization that is based on randomized matrix approximation techniques, developed in in "Randomized Block Krylov Methods for Stronger and Faster Approximate Singular Value Decomposition"
 CRandomizedSVDRandomized SVD is a matrix factorization that is based on randomized matrix approximation techniques, developed in in "Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions"
 CRegularizedSVD< OptimizerType >Regularized SVD is a matrix factorization technique that seeks to reduce the error on the training set, that is on the examples for which the ratings have been provided by the users
 CRegularizedSVDFunction< MatType >The data is stored in a matrix of type MatType, so that this class can be used with both dense and sparse matrix types
 CTimerThe timer class provides a way for mlpack methods to be timed
 CTimers
 CAllCategoricalSplit< FitnessFunction >The AllCategoricalSplit is a splitting function that will split categorical features into many children: one child for each category
 CAllCategoricalSplit< FitnessFunction >::AuxiliarySplitInfo< ElemType >
 CAllDimensionSelectThis dimension selection policy allows any dimension to be selected for splitting
 CAxisParallelProjVectorAxisParallelProjVector defines an axis-parallel projection vector
 CBestBinaryNumericSplit< FitnessFunction >The BestBinaryNumericSplit is a splitting function for decision trees that will exhaustively search a numeric dimension for the best binary split
 CBestBinaryNumericSplit< FitnessFunction >::AuxiliarySplitInfo< ElemType >
 CBinaryNumericSplit< FitnessFunction, ObservationType >The BinaryNumericSplit class implements the numeric feature splitting strategy devised by Gama, Rocha, and Medas in the following paper:
 CBinaryNumericSplitInfo< ObservationType >
 CBinarySpaceTree< MetricType, StatisticType, MatType, BoundType, SplitType >A binary space partitioning tree, such as a KD-tree or a ball tree
 CBinarySpaceTree< MetricType, StatisticType, MatType, BoundType, SplitType >::BreadthFirstDualTreeTraverser< RuleType >
 CBinarySpaceTree< MetricType, StatisticType, MatType, BoundType, SplitType >::DualTreeTraverser< RuleType >A dual-tree traverser for binary space trees; see dual_tree_traverser.hpp
 CBinarySpaceTree< MetricType, StatisticType, MatType, BoundType, SplitType >::SingleTreeTraverser< RuleType >A single-tree traverser for binary space trees; see single_tree_traverser.hpp for implementation
 CCategoricalSplitInfo
 CCompareCosineNode
 CCosineTree
 CCoverTree< MetricType, StatisticType, MatType, RootPointPolicy >A cover tree is a tree specifically designed to speed up nearest-neighbor computation in high-dimensional spaces
 CCoverTree< MetricType, StatisticType, MatType, RootPointPolicy >::DualTreeTraverser< RuleType >A dual-tree cover tree traverser; see dual_tree_traverser.hpp
 CCoverTree< MetricType, StatisticType, MatType, RootPointPolicy >::SingleTreeTraverser< RuleType >A single-tree cover tree traverser; see single_tree_traverser.hpp for implementation
 CDiscreteHilbertValue< TreeElemType >The DiscreteHilbertValue class stores Hilbert values for all of the points in a RectangleTree node, and calculates Hilbert values for new points
 CEmptyStatisticEmpty statistic if you are not interested in storing statistics in your tree
 CExampleTree< MetricType, StatisticType, MatType >This is not an actual space tree but instead an example tree that exists to show and document all the functions that mlpack trees must implement
 CFirstPointIsRootThis class is meant to be used as a choice for the policy class RootPointPolicy of the CoverTree class
 CGiniGainThe Gini gain, a measure of set purity usable as a fitness function (FitnessFunction) for decision trees
 CGiniImpurity
 CGreedySingleTreeTraverser< TreeType, RuleType >
 CHilbertRTreeAuxiliaryInformation< TreeType, HilbertValueType >
 CHilbertRTreeDescentHeuristicThis class chooses the best child of a node in a Hilbert R tree when inserting a new point
 CHilbertRTreeSplit< splitOrder >The splitting procedure for the Hilbert R tree
 CHoeffdingCategoricalSplit< FitnessFunction >This is the standard Hoeffding-bound categorical feature proposed in the paper below:
 CHoeffdingNumericSplit< FitnessFunction, ObservationType >The HoeffdingNumericSplit class implements the numeric feature splitting strategy alluded to by Domingos and Hulten in the following paper:
 CHoeffdingTree< FitnessFunction, NumericSplitType, CategoricalSplitType >The HoeffdingTree object represents all of the necessary information for a Hoeffding-bound-based decision tree
 CHoeffdingTreeModelThis class is a serializable Hoeffding tree model that can hold four different types of Hoeffding trees
 CHyperplaneBase< BoundT, ProjVectorT >HyperplaneBase defines a splitting hyperplane based on a projection vector and projection value
 CInformationGainThe standard information gain criterion, used for calculating gain in decision trees
 CIsSpillTree< TreeType >
 CIsSpillTree< tree::SpillTree< MetricType, StatisticType, MatType, HyperplaneType, SplitType > >
 CMeanSpaceSplit< MetricType, MatType >
 CMeanSplit< BoundType, MatType >A binary space partitioning tree node is split into its left and right child
 CMeanSplit< BoundType, MatType >::SplitInfoAn information about the partition
 CMidpointSpaceSplit< MetricType, MatType >
 CMidpointSplit< BoundType, MatType >A binary space partitioning tree node is split into its left and right child
 CMidpointSplit< BoundType, MatType >::SplitInfoA struct that contains an information about the split
 CMinimalCoverageSweep< SplitPolicy >The MinimalCoverageSweep class finds a partition along which we can split a node according to the coverage of two resulting nodes
 CMinimalCoverageSweep< SplitPolicy >::SweepCost< TreeType >A struct that provides the type of the sweep cost
 CMinimalSplitsNumberSweep< SplitPolicy >The MinimalSplitsNumberSweep class finds a partition along which we can split a node according to the number of required splits of the node
 CMinimalSplitsNumberSweep< SplitPolicy >::SweepCost< typename >A struct that provides the type of the sweep cost
 CMultipleRandomDimensionSelect< NumDimensions >This dimension selection policy allows the selection from a few random dimensions
 CNoAuxiliaryInformation< TreeType >
 CNumericSplitInfo< ObservationType >
 COctree< MetricType, StatisticType, MatType >
 COctree< MetricType, StatisticType, MatType >::DualTreeTraverser< MetricType, StatisticType, MatType >A dual-tree traverser; see dual_tree_traverser.hpp
 COctree< MetricType, StatisticType, MatType >::SingleTreeTraverser< RuleType >A single-tree traverser; see single_tree_traverser.hpp
 COctree< MetricType, StatisticType, MatType >::SplitType::SplitInfo
 CProjVectorProjVector defines a general projection vector (not necessarily axis-parallel)
 CQueueFrame< TreeType, TraversalInfoType >
 CRandomDimensionSelectThis dimension selection policy only selects one single random dimension
 CRandomForest< FitnessFunction, DimensionSelectionType, NumericSplitType, CategoricalSplitType, ElemType >
 CRectangleTree< MetricType, StatisticType, MatType, SplitType, DescentType, AuxiliaryInformationType >A rectangle type tree tree, such as an R-tree or X-tree
 CRectangleTree< MetricType, StatisticType, MatType, SplitType, DescentType, AuxiliaryInformationType >::DualTreeTraverser< MetricType, StatisticType, MatType, SplitType, DescentType, AuxiliaryInformationType >A dual tree traverser for rectangle type trees
 CRectangleTree< MetricType, StatisticType, MatType, SplitType, DescentType, AuxiliaryInformationType >::SingleTreeTraverser< RuleType >A single traverser for rectangle type trees
 CRPlusPlusTreeAuxiliaryInformation< TreeType >
 CRPlusPlusTreeDescentHeuristic
 CRPlusPlusTreeSplitPolicyThe RPlusPlusTreeSplitPolicy helps to determine the subtree into which we should insert a child of an intermediate node that is being split
 CRPlusTreeDescentHeuristic
 CRPlusTreeSplit< SplitPolicyType, SweepType >The RPlusTreeSplit class performs the split process of a node on overflow
 CRPlusTreeSplitPolicyThe RPlusPlusTreeSplitPolicy helps to determine the subtree into which we should insert a child of an intermediate node that is being split
 CRPTreeMaxSplit< BoundType, MatType >This class splits a node by a random hyperplane
 CRPTreeMaxSplit< BoundType, MatType >::SplitInfoAn information about the partition
 CRPTreeMeanSplit< BoundType, MatType >This class splits a binary space tree
 CRPTreeMeanSplit< BoundType, MatType >::SplitInfoAn information about the partition
 CRStarTreeDescentHeuristicWhen descending a RectangleTree to insert a point, we need to have a way to choose a child node when the point isn't enclosed by any of them
 CRStarTreeSplitA Rectangle Tree has new points inserted at the bottom
 CRTreeDescentHeuristicWhen descending a RectangleTree to insert a point, we need to have a way to choose a child node when the point isn't enclosed by any of them
 CRTreeSplitA Rectangle Tree has new points inserted at the bottom
 CSpaceSplit< MetricType, MatType >
 CSpillTree< MetricType, StatisticType, MatType, HyperplaneType, SplitType >A hybrid spill tree is a variant of binary space trees in which the children of a node can "spill over" each other, and contain shared datapoints
 CSpillTree< MetricType, StatisticType, MatType, HyperplaneType, SplitType >::SpillDualTreeTraverser< MetricType, StatisticType, MatType, HyperplaneType, SplitType >A generic dual-tree traverser for hybrid spill trees; see spill_dual_tree_traverser.hpp for implementation
 CSpillTree< MetricType, StatisticType, MatType, HyperplaneType, SplitType >::SpillSingleTreeTraverser< MetricType, StatisticType, MatType, HyperplaneType, SplitType >A generic single-tree traverser for hybrid spill trees; see spill_single_tree_traverser.hpp for implementation
 CTraversalInfo< TreeType >The TraversalInfo class holds traversal information which is used in dual-tree (and single-tree) traversals
 CTreeTraits< TreeType >The TreeTraits class provides compile-time information on the characteristics of a given tree type
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, bound::BallBound, SplitType > >This is a specialization of the TreeType class to the BallTree tree type
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, bound::CellBound, SplitType > >This is a specialization of the TreeType class to the UBTree tree type
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, bound::HollowBallBound, SplitType > >This is a specialization of the TreeType class to an arbitrary tree with HollowBallBound (currently only the vantage point tree is supported)
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, BoundType, RPTreeMaxSplit > >This is a specialization of the TreeType class to the max-split random projection tree
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, BoundType, RPTreeMeanSplit > >This is a specialization of the TreeType class to the mean-split random projection tree
 CTreeTraits< BinarySpaceTree< MetricType, StatisticType, MatType, BoundType, SplitType > >This is a specialization of the TreeTraits class to the BinarySpaceTree tree type
 CTreeTraits< CoverTree< MetricType, StatisticType, MatType, RootPointPolicy > >The specialization of the TreeTraits class for the CoverTree tree type
 CTreeTraits< Octree< MetricType, StatisticType, MatType > >This is a specialization of the TreeTraits class to the Octree tree type
 CTreeTraits< RectangleTree< MetricType, StatisticType, MatType, RPlusTreeSplit< SplitPolicyType, SweepType >, DescentType, AuxiliaryInformationType > >Since the R+/R++ tree can not have overlapping children, we should define traits for the R+/R++ tree
 CTreeTraits< RectangleTree< MetricType, StatisticType, MatType, SplitType, DescentType, AuxiliaryInformationType > >This is a specialization of the TreeType class to the RectangleTree tree type
 CTreeTraits< SpillTree< MetricType, StatisticType, MatType, HyperplaneType, SplitType > >This is a specialization of the TreeType class to the SpillTree tree type
 CUBTreeSplit< BoundType, MatType >Split a node into two parts according to the median address of points contained in the node
 CVantagePointSplit< BoundType, MatType, MaxNumSamples >The class splits a binary space partitioning tree node according to the median distance to the vantage point
 CVantagePointSplit< BoundType, MatType, MaxNumSamples >::SplitInfoA struct that contains an information about the split
 CXTreeAuxiliaryInformation< TreeType >The XTreeAuxiliaryInformation class provides information specific to X trees for each node in a RectangleTree
 CXTreeAuxiliaryInformation< TreeType >::SplitHistoryStructThe X tree requires that the tree records it's "split history"
 CXTreeSplitA Rectangle Tree has new points inserted at the bottom
 CIsStdVector< T >Metaprogramming structure for vector detection
 CIsStdVector< std::vector< T, A > >Metaprogramming structure for vector detection
 CNullOutStreamUsed for Log::Debug when not compiled with debugging symbols
 CParamDataThis structure holds all of the information about a single parameter, including its value (which is set when ParseCommandLine() is called)
 CPrefixedOutStreamAllows us to output to an ostream with a prefix at the beginning of each line, in the same way we would output to cout or cerr
 CProgramDocA static object whose constructor registers program documentation with the CLI class
 CNeighborSearchStat< neighbor::NearestNeighborSort >
 Ctemplate AuxiliarySplitInfo< ElemType >
 CRangeType< double >
 CRangeType< ElemType >
 CSDP< arma::sp_mat >
 CSGD< mlpack::optimization::AdaDeltaUpdate >
 CSGD< mlpack::optimization::AdaGradUpdate >
 CSGD< mlpack::optimization::RMSPropUpdate >
 CSGD< mlpack::optimization::SMORMS3Update >
 CSGD< UpdatePolicyType, CyclicalDecay >
 CSGD< UpdatePolicyType, SnapshotEnsembles >
 CSGD< UpdateRule >
 CSparseSVMFunction
 Ctrue_type
 CTrainFormBase< PT, void, const MT &, const data::DatasetInfo &, const PT &, const size_t >
 CTrainFormBase< PT, void, const MT &, const data::DatasetInfo &, const PT &>
 CTrainFormBase< PT, void, const MT &, const PT &, const size_t >
 CTrainFormBase< PT, void, const MT &, const PT &>
 CTrainFormBase< PT, WT, const MT &, const data::DatasetInfo &, const PT &, const size_t, const WT &>
 CTrainFormBase< PT, WT, const MT &, const data::DatasetInfo &, const PT &, const WT &>
 CTrainFormBase< PT, WT, const MT &, const PT &, const size_t, const WT &>
 CTrainFormBase< PT, WT, const MT &, const PT &, const WT &>
 CTrainHMMModel