Skip to content
Snippets Groups Projects
Commit 19ec0310 authored by Manuel Grizonnet's avatar Manuel Grizonnet
Browse files

Merge branch 'app_docsprint' of https://git.orfeo-toolbox.org/git/otb into app_docsprint

parents dfa256b4 db93f2a2
No related branches found
No related tags found
No related merge requests found
......@@ -39,11 +39,28 @@ namespace Wrapper
//BoostType
AddParameter(ParameterType_Choice, "classifier.boost.t", "Boost Type");
AddChoice("classifier.boost.t.discrete", "Discrete AdaBoost");
AddChoice("classifier.boost.t.real", "Real AdaBoost (technique using confidence-rated predictions "
"and working well with categorical data)");
AddChoice("classifier.boost.t.logit", "LogitBoost (technique producing good regression fits)");
AddChoice("classifier.boost.t.gentle", "Gentle AdaBoost (technique setting less weight on outlier data points "
"and, for that reason, being often good with regression data)");
SetParameterDescription("classifier.boost.t.discret",
"This procedure trains the classifiers on weighted versions of the training "
"sample, giving higher weight to cases that are currently misclassified. "
"This is done for a sequence of weighter samples, and then the final "
"classifier is defined as a linear combination of the classifier from "
"each stage.");
AddChoice("classifier.boost.t.real",
"Real AdaBoost (technique using confidence-rated predictions "
"and working well with categorical data)");
SetParameterDescription("classifier.boost.t.real",
"Adaptation of the Discrete Adaboost algorithm with Real value");
AddChoice("classifier.boost.t.logit",
"LogitBoost (technique producing good regression fits)");
SetParameterDescription("classifier.boost.t.logit",
"This procedure is an adaptive Newton algorithm for fitting an additive "
"logistic regression model. Beware it can produce numeric instability.");
AddChoice("classifier.boost.t.gentle",
"Gentle AdaBoost (technique setting less weight on outlier data points "
"and, for that reason, being often good with regression data)");
SetParameterDescription("classifier.boost.t.gentle",
"A modified version of the Real Adaboost algorithm, using Newton stepping "
"rather than exact optimization at each step.");
SetParameterString("classifier.boost.t", "real", false);
SetParameterDescription("classifier.boost.t", "Type of Boosting algorithm.");
//Do not expose SplitCriteria
......@@ -54,9 +71,11 @@ namespace Wrapper
//WeightTrimRate
AddParameter(ParameterType_Float, "classifier.boost.r", "Weight Trim Rate");
SetParameterFloat("classifier.boost.r",0.95, false);
SetParameterDescription("classifier.boost.r","A threshold between 0 and 1 used to save computational time. "
"Samples with summary weight <= (1 - weight_trim_rate) do not participate in the next iteration of training. "
"Set this parameter to 0 to turn off this functionality.");
SetParameterDescription("classifier.boost.r",
"A threshold between 0 and 1 used to save computational time. "
"Samples with summary weight <= (1 - weight_trim_rate) do not participate in"
" the next iteration of training. Set this parameter to 0 to turn off this "
"functionality.");
//MaxDepth : Not sure that this parameter has to be exposed.
AddParameter(ParameterType_Int, "classifier.boost.m", "Maximum depth of the tree");
SetParameterInt("classifier.boost.m",1, false);
......
......@@ -35,8 +35,8 @@ LearningApplicationBase<TInputValue,TOutputValue>
{
AddChoice("classifier.dt", "Decision Tree classifier");
SetParameterDescription("classifier.dt",
"This group of parameters allows setting Decision Tree classifier parameters. "
"See complete documentation here \\url{http://docs.opencv.org/modules/ml/doc/decision_trees.html}.");
"This group of parameters allows setting Decision Tree classifier parameters. "
"See complete documentation here \\url{http://docs.opencv.org/modules/ml/doc/decision_trees.html}.");
//MaxDepth
AddParameter(ParameterType_Int, "classifier.dt.max", "Maximum depth of the tree");
#ifdef OTB_OPENCV_3
......@@ -44,23 +44,25 @@ LearningApplicationBase<TInputValue,TOutputValue>
#else
SetParameterInt("classifier.dt.max",65535, false);
#endif
SetParameterDescription(
"classifier.dt.max", "The training algorithm attempts to split each node while its depth is smaller than the maximum "
"possible depth of the tree. The actual depth may be smaller if the other termination criteria are met, and/or "
"if the tree is pruned.");
SetParameterDescription("classifier.dt.max",
"The training algorithm attempts to split each node while its depth is smaller "
"than the maximum possible depth of the tree. The actual depth may be smaller "
"if the other termination criteria are met, and/or if the tree is pruned.");
//MinSampleCount
AddParameter(ParameterType_Int, "classifier.dt.min", "Minimum number of samples in each node");
SetParameterInt("classifier.dt.min",10, false);
SetParameterDescription("classifier.dt.min", "If the number of samples in a node is smaller than this parameter, "
"then this node will not be split.");
SetParameterDescription("classifier.dt.min",
"If the number of samples in a node is smaller "
"than this parameter, then this node will not be split.");
//RegressionAccuracy
AddParameter(ParameterType_Float, "classifier.dt.ra", "Termination criteria for regression tree");
SetParameterFloat("classifier.dt.ra",0.01, false);
SetParameterDescription("classifier.dt.min", "If all absolute differences between an estimated value in a node "
"and the values of the train samples in this node are smaller than this regression accuracy parameter, "
"then the node will not be split.");
SetParameterDescription("classifier.dt.ra",
"If all absolute differences between an estimated value in a node "
"and the values of the train samples in this node are smaller than this "
"regression accuracy parameter, then the node will not be split further.");
//UseSurrogates : don't need to be exposed !
//AddParameter(ParameterType_Empty, "classifier.dt.sur", "Surrogate splits will be built");
......@@ -68,11 +70,12 @@ LearningApplicationBase<TInputValue,TOutputValue>
//MaxCategories
AddParameter(ParameterType_Int, "classifier.dt.cat",
"Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split");
"Cluster possible values of a categorical variable into K <= cat clusters to find a "
"suboptimal split");
SetParameterInt("classifier.dt.cat",10, false);
SetParameterDescription(
"classifier.dt.cat",
"Cluster possible values of a categorical variable into K <= cat clusters to find a suboptimal split.");
SetParameterDescription("classifier.dt.cat",
"Cluster possible values of a categorical variable into K <= cat clusters to find a "
"suboptimal split.");
//CVFolds
AddParameter(ParameterType_Int, "classifier.dt.f", "K-fold cross-validations");
......@@ -82,18 +85,20 @@ LearningApplicationBase<TInputValue,TOutputValue>
#else
SetParameterInt("classifier.dt.f",10, false);
#endif
SetParameterDescription(
"classifier.dt.f", "If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K is equal to cv_folds.");
SetParameterDescription("classifier.dt.f",
"If cv_folds > 1, then it prunes a tree with K-fold cross-validation where K "
"is equal to cv_folds.");
//Use1seRule
AddParameter(ParameterType_Empty, "classifier.dt.r", "Set Use1seRule flag to false");
SetParameterDescription(
"classifier.dt.r",
"If true, then a pruning will be harsher. This will make a tree more compact and more resistant to the training data noise but a bit less accurate.");
SetParameterDescription("classifier.dt.r",
"If true, then a pruning will be harsher. This will make a tree more compact and more "
"resistant to the training data noise but a bit less accurate.");
//TruncatePrunedTree
AddParameter(ParameterType_Empty, "classifier.dt.t", "Set TruncatePrunedTree flag to false");
SetParameterDescription("classifier.dt.t", "If true, then pruned branches are physically removed from the tree.");
SetParameterDescription("classifier.dt.t",
"If true, then pruned branches are physically removed from the tree.");
//Priors are not exposed.
......
......@@ -37,9 +37,23 @@ namespace Wrapper
SetParameterDescription("classifier.libsvm", "This group of parameters allows setting SVM classifier parameters.");
AddParameter(ParameterType_Choice, "classifier.libsvm.k", "SVM Kernel Type");
AddChoice("classifier.libsvm.k.linear", "Linear");
SetParameterDescription("classifier.libsvm.k.linear",
"Linear Kernel, no mapping is done, this is the fastest option.");
AddChoice("classifier.libsvm.k.rbf", "Gaussian radial basis function");
SetParameterDescription("classifier.libsvm.k.rbf",
"This kernel is a good choice in most of the case. It is "
"an exponential function of the euclidian distance between "
"the vectors.");
AddChoice("classifier.libsvm.k.poly", "Polynomial");
SetParameterDescription("classifier.libsvm.k.poly",
"Polynomial Kernel, the mapping is a polynomial function.")
AddChoice("classifier.libsvm.k.sigmoid", "Sigmoid");
SetParameterDescription("classifier.libsvm.k.sigmoid",
"The kernel is a hyperbolic tangente function of the vectors.");
SetParameterString("classifier.libsvm.k", "linear", false);
SetParameterDescription("classifier.libsvm.k", "SVM Kernel Type.");
AddParameter(ParameterType_Choice, "classifier.libsvm.m", "SVM Model Type");
......@@ -47,21 +61,41 @@ namespace Wrapper
if (this->m_RegressionFlag)
{
AddChoice("classifier.libsvm.m.epssvr", "Epsilon Support Vector Regression");
SetParameterDescription("classifier.libsvm.m.epssvr",
"The distance between feature vectors from the training set and the "
"fitting hyper-plane must be less than Epsilon. For outliers the penalty "
"multiplier C is used ");
AddChoice("classifier.libsvm.m.nusvr", "Nu Support Vector Regression");
SetParameterString("classifier.libsvm.m", "epssvr", false);
SetParameterDescription("classifier.libsvm.m.nusvr",
"Same as the epsilon regression except that this time the bounded "
"parameter nu is used instead of epsilon");
}
else
{
AddChoice("classifier.libsvm.m.csvc", "C support vector classification");
SetParameterDescription("classifier.libsvm.m.csvc",
"This formulation allows imperfect separation of classes. The penalty "
"is set through the cost parameter C.");
AddChoice("classifier.libsvm.m.nusvc", "Nu support vector classification");
SetParameterDescription("classifier.libsvm.m.nusvc",
"This formulation allows imperfect separation of classes. The penalty "
"is set through the cost parameter Nu. As compared to C, Nu is harder "
"to optimize, and may not be as fast.");
AddChoice("classifier.libsvm.m.oneclass", "Distribution estimation (One Class SVM)");
SetParameterString("classifier.libsvm.m", "csvc", false);
}
AddParameter(ParameterType_Float, "classifier.libsvm.c", "Cost parameter C");
SetParameterFloat("classifier.libsvm.c",1.0, false);
SetParameterDescription(
"classifier.libsvm.c",
"SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.");
SetParameterDescription("classifier.libsvm.c",
"SVM models have a cost parameter C (1 by default) to control the "
"trade-off between training errors and forcing rigid margins.");
// It seems that it miss a nu parameter for the nu-SVM use.
AddParameter(ParameterType_Empty, "classifier.libsvm.opt", "Parameters optimization");
MandatoryOff("classifier.libsvm.opt");
SetParameterDescription("classifier.libsvm.opt", "SVM parameters optimization flag.");
......@@ -73,8 +107,13 @@ namespace Wrapper
{
AddParameter(ParameterType_Float, "classifier.libsvm.eps", "Epsilon");
SetParameterFloat("classifier.libsvm.eps",1e-3, false);
SetParameterDescription("classifier.libsvm.eps",
"Parameter for the epsilon regression mode.");
AddParameter(ParameterType_Float, "classifier.libsvm.nu", "Nu");
SetParameterFloat("classifier.libsvm.nu",0.5, false);
SetParameterDescription("classifier.libsvm.nu",
"Cost parameter Nu, in the range 0..1, the larger the value, "
"the smoother the decision.");
}
}
......
......@@ -36,28 +36,37 @@ LearningApplicationBase<TInputValue,TOutputValue>
{
AddChoice("classifier.ann", "Artificial Neural Network classifier");
SetParameterDescription("classifier.ann",
"This group of parameters allows setting Artificial Neural Network classifier parameters. "
"See complete documentation here \\url{http://docs.opencv.org/modules/ml/doc/neural_networks.html}.");
"This group of parameters allows setting Artificial Neural Network "
"classifier parameters. See complete documentation here "
"\\url{http://docs.opencv.org/modules/ml/doc/neural_networks.html}.");
//TrainMethod
AddParameter(ParameterType_Choice, "classifier.ann.t", "Train Method Type");
AddChoice("classifier.ann.t.reg", "RPROP algorithm");
SetParameterDescription("classifier.ann.t.reg",
"");
AddChoice("classifier.ann.t.back", "Back-propagation algorithm");
SetParameterDescription("classifier.ann.t.back",
"");
SetParameterString("classifier.ann.t", "reg", false);
SetParameterDescription("classifier.ann.t", "Type of training method for the multilayer perceptron (MLP) neural network.");
SetParameterDescription("classifier.ann.t",
"Type of training method for the multilayer perceptron (MLP) neural network.");
//LayerSizes
//There is no ParameterType_IntList, so i use a ParameterType_StringList and convert it.
/*std::vector<std::string> layerSizes;
layerSizes.push_back("100");
layerSizes.push_back("100"); */
AddParameter(ParameterType_StringList, "classifier.ann.sizes", "Number of neurons in each intermediate layer");
AddParameter(ParameterType_StringList, "classifier.ann.sizes",
"Number of neurons in each intermediate layer");
//SetParameterStringList("classifier.ann.sizes", layerSizes);
SetParameterDescription("classifier.ann.sizes",
"The number of neurons in each intermediate layer (excluding input and output layers).");
"The number of neurons in each intermediate layer (excluding input and output layers).");
//ActivateFunction
AddParameter(ParameterType_Choice, "classifier.ann.f", "Neuron activation function type");
AddParameter(ParameterType_Choice, "classifier.ann.f",
"Neuron activation function type");
AddChoice("classifier.ann.f.ident", "Identity function");
AddChoice("classifier.ann.f.sig", "Symmetrical Sigmoid function");
AddChoice("classifier.ann.f.gau", "Gaussian function (Not completely supported)");
......@@ -65,47 +74,51 @@ LearningApplicationBase<TInputValue,TOutputValue>
SetParameterDescription("classifier.ann.f", "Neuron activation function.");
//Alpha
AddParameter(ParameterType_Float, "classifier.ann.a", "Alpha parameter of the activation function");
AddParameter(ParameterType_Float, "classifier.ann.a",
"Alpha parameter of the activation function");
SetParameterFloat("classifier.ann.a",1., false);
SetParameterDescription("classifier.ann.a",
"Alpha parameter of the activation function (used only with sigmoid and gaussian functions).");
"Alpha parameter of the activation function (used only with sigmoid and gaussian functions).");
//Beta
AddParameter(ParameterType_Float, "classifier.ann.b", "Beta parameter of the activation function");
AddParameter(ParameterType_Float, "classifier.ann.b",
"Beta parameter of the activation function");
SetParameterFloat("classifier.ann.b",1., false);
SetParameterDescription("classifier.ann.b",
"Beta parameter of the activation function (used only with sigmoid and gaussian functions).");
"Beta parameter of the activation function (used only with sigmoid and gaussian functions).");
//BackPropDWScale
AddParameter(ParameterType_Float, "classifier.ann.bpdw",
"Strength of the weight gradient term in the BACKPROP method");
"Strength of the weight gradient term in the BACKPROP method");
SetParameterFloat("classifier.ann.bpdw",0.1, false);
SetParameterDescription(
"classifier.ann.bpdw",
"Strength of the weight gradient term in the BACKPROP method. The recommended value is about 0.1.");
SetParameterDescription("classifier.ann.bpdw",
"Strength of the weight gradient term in the BACKPROP method. The "
"recommended value is about 0.1.");
//BackPropMomentScale
AddParameter(ParameterType_Float, "classifier.ann.bpms",
"Strength of the momentum term (the difference between weights on the 2 previous iterations)");
"Strength of the momentum term (the difference between weights on the 2 previous iterations)");
SetParameterFloat("classifier.ann.bpms",0.1, false);
SetParameterDescription(
"classifier.ann.bpms",
"Strength of the momentum term (the difference between weights on the 2 previous iterations). "
"This parameter provides some inertia to smooth the random fluctuations of the weights. "
"It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.");
SetParameterDescription("classifier.ann.bpms",
"Strength of the momentum term (the difference between weights on the 2 previous "
"iterations). This parameter provides some inertia to smooth the random "
"fluctuations of the weights. It can vary from 0 (the feature is disabled) "
"to 1 and beyond. The value 0.1 or so is good enough.");
//RegPropDW0
AddParameter(ParameterType_Float, "classifier.ann.rdw",
"Initial value Delta_0 of update-values Delta_{ij} in RPROP method");
"Initial value Delta_0 of update-values Delta_{ij} in RPROP method");
SetParameterFloat("classifier.ann.rdw",0.1, false);
SetParameterDescription("classifier.ann.rdw", "Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).");
SetParameterDescription("classifier.ann.rdw",
"Initial value Delta_0 of update-values Delta_{ij} in RPROP method (default = 0.1).");
//RegPropDWMin
AddParameter(ParameterType_Float, "classifier.ann.rdwm", "Update-values lower limit Delta_{min} in RPROP method");
AddParameter(ParameterType_Float, "classifier.ann.rdwm",
"Update-values lower limit Delta_{min} in RPROP method");
SetParameterFloat("classifier.ann.rdwm",1e-7, false);
SetParameterDescription(
"classifier.ann.rdwm",
"Update-values lower limit Delta_{min} in RPROP method. It must be positive (default = 1e-7).");
SetParameterDescription("classifier.ann.rdwm",
"Update-values lower limit Delta_{min} in RPROP method. It must be positive "
"(default = 1e-7).");
//TermCriteriaType
AddParameter(ParameterType_Choice, "classifier.ann.term", "Termination criteria");
......@@ -116,15 +129,18 @@ LearningApplicationBase<TInputValue,TOutputValue>
SetParameterDescription("classifier.ann.term", "Termination criteria.");
//Epsilon
AddParameter(ParameterType_Float, "classifier.ann.eps", "Epsilon value used in the Termination criteria");
AddParameter(ParameterType_Float, "classifier.ann.eps",
"Epsilon value used in the Termination criteria");
SetParameterFloat("classifier.ann.eps",0.01, false);
SetParameterDescription("classifier.ann.eps", "Epsilon value used in the Termination criteria.");
SetParameterDescription("classifier.ann.eps",
"Epsilon value used in the Termination criteria.");
//MaxIter
AddParameter(ParameterType_Int, "classifier.ann.iter",
"Maximum number of iterations used in the Termination criteria");
"Maximum number of iterations used in the Termination criteria");
SetParameterInt("classifier.ann.iter",1000, false);
SetParameterDescription("classifier.ann.iter", "Maximum number of iterations used in the Termination criteria.");
SetParameterDescription("classifier.ann.iter",
"Maximum number of iterations used in the Termination criteria.");
}
......
......@@ -53,6 +53,7 @@ namespace Wrapper
}
AddParameter(ParameterType_Choice, "classifier.svm.k", "SVM Kernel Type");
AddChoice("classifier.svm.k.linear", "Linear");
AddChoice("classifier.svm.k.rbf", "Gaussian radial basis function");
AddChoice("classifier.svm.k.poly", "Polynomial");
AddChoice("classifier.svm.k.sigmoid", "Sigmoid");
......@@ -60,52 +61,71 @@ namespace Wrapper
SetParameterDescription("classifier.svm.k", "SVM Kernel Type.");
AddParameter(ParameterType_Float, "classifier.svm.c", "Cost parameter C");
SetParameterFloat("classifier.svm.c",1.0, false);
SetParameterDescription(
"classifier.svm.c",
"SVM models have a cost parameter C (1 by default) to control the trade-off between training errors and forcing rigid margins.");
SetParameterDescription("classifier.svm.c",
"SVM models have a cost parameter C (1 by default) to control the trade-off"
" between training errors and forcing rigid margins.");
AddParameter(ParameterType_Float, "classifier.svm.nu",
"Parameter nu of a SVM optimization problem (NU_SVC / ONE_CLASS)");
SetParameterFloat("classifier.svm.nu",0.0, false);
SetParameterDescription("classifier.svm.nu", "Parameter nu of a SVM optimization problem.");
SetParameterDescription("classifier.svm.nu",
"Parameter nu of a SVM optimization problem.");
if (this->m_RegressionFlag)
{
AddParameter(ParameterType_Float, "classifier.svm.p", "Parameter epsilon of a SVM optimization problem (EPS_SVR)");
SetParameterFloat("classifier.svm.p",1.0, false);
SetParameterDescription("classifier.svm.p", "Parameter epsilon of a SVM optimization problem (EPS_SVR).");
AddParameter(ParameterType_Choice, "classifier.svm.term", "Termination criteria");
SetParameterDescription("classifier.svm.term","Termination criteria for iterative algorithm");
AddChoice("classifier.svm.term.iter", "Stops when maximum iteration is reached.");
AddChoice("classifier.svm.term.eps", "Stops when accuracy is lower than epsilon.");
AddChoice("classifier.svm.term.all", "Stops when either iteration or epsilon criteria is true");
AddParameter(ParameterType_Choice,
"classifier.svm.term", "Termination criteria");
SetParameterDescription("classifier.svm.term",
"Termination criteria for iterative algorithm");
AddChoice("classifier.svm.term.iter",
"Stops when maximum iteration is reached.");
AddChoice("classifier.svm.term.eps",
"Stops when accuracy is lower than epsilon.");
AddChoice("classifier.svm.term.all",
"Stops when either iteration or epsilon criteria is true");
AddParameter(ParameterType_Float, "classifier.svm.iter", "Maximum iteration");
SetParameterFloat("classifier.svm.iter",1000, false);
SetParameterDescription("classifier.svm.iter", "Maximum number of iterations (corresponds to the termination criteria 'iter').");
SetParameterDescription("classifier.svm.iter",
"Maximum number of iterations (corresponds to the termination criteria 'iter').");
AddParameter(ParameterType_Float, "classifier.svm.eps", "Epsilon accuracy threshold");
AddParameter(ParameterType_Float, "classifier.svm.eps",
"Epsilon accuracy threshold");
SetParameterFloat("classifier.svm.eps",FLT_EPSILON, false);
SetParameterDescription("classifier.svm.eps", "Epsilon accuracy (corresponds to the termination criteria 'eps').");
SetParameterDescription("classifier.svm.eps",
"Epsilon accuracy (corresponds to the termination criteria 'eps').");
}
AddParameter(ParameterType_Float, "classifier.svm.coef0", "Parameter coef0 of a kernel function (POLY / SIGMOID)");
AddParameter(ParameterType_Float, "classifier.svm.coef0",
"Parameter coef0 of a kernel function (POLY / SIGMOID)");
SetParameterFloat("classifier.svm.coef0",0.0, false);
SetParameterDescription("classifier.svm.coef0", "Parameter coef0 of a kernel function (POLY / SIGMOID).");
SetParameterDescription("classifier.svm.coef0",
"Parameter coef0 of a kernel function (POLY / SIGMOID).");
AddParameter(ParameterType_Float, "classifier.svm.gamma",
"Parameter gamma of a kernel function (POLY / RBF / SIGMOID)");
SetParameterFloat("classifier.svm.gamma",1.0, false);
SetParameterDescription("classifier.svm.gamma", "Parameter gamma of a kernel function (POLY / RBF / SIGMOID).");
AddParameter(ParameterType_Float, "classifier.svm.degree", "Parameter degree of a kernel function (POLY)");
SetParameterDescription("classifier.svm.gamma",
"Parameter gamma of a kernel function (POLY / RBF / SIGMOID).");
AddParameter(ParameterType_Float, "classifier.svm.degree",
"Parameter degree of a kernel function (POLY)");
SetParameterFloat("classifier.svm.degree",1.0, false);
SetParameterDescription("classifier.svm.degree", "Parameter degree of a kernel function (POLY).");
AddParameter(ParameterType_Empty, "classifier.svm.opt", "Parameters optimization");
SetParameterDescription("classifier.svm.degree",
"Parameter degree of a kernel function (POLY).");
AddParameter(ParameterType_Empty, "classifier.svm.opt",
"Parameters optimization");
MandatoryOff("classifier.svm.opt");
SetParameterDescription("classifier.svm.opt", "SVM parameters optimization flag.\n-If set to True, then the optimal SVM parameters will be estimated. "
"Parameters are considered optimal by OpenCV when the cross-validation estimate of the test set error is minimal. "
"Finally, the SVM training process is computed 10 times with these optimal parameters over subsets corresponding to 1/10th of "
"the training samples using the k-fold cross-validation (with k = 10).\n-If set to False, the SVM classification process will be "
"computed once with the currently set input SVM parameters over the training samples.\n-Thus, even with identical input SVM "
"parameters and a similar random seed, the output SVM models will be different according to the method used (optimized or not) "
"because the samples are not identically processed within OpenCV.");
SetParameterDescription("classifier.svm.opt", "SVM parameters optimization flag.\n"
"-If set to True, then the optimal SVM parameters will be estimated. "
"Parameters are considered optimal by OpenCV when the cross-validation estimate of "
"the test set error is minimal. Finally, the SVM training process is computed "
"10 times with these optimal parameters over subsets corresponding to 1/10th of "
"the training samples using the k-fold cross-validation (with k = 10).\n-If set "
"to False, the SVM classification process will be computed once with the "
"currently set input SVM parameters over the training samples.\n-Thus, even "
"with identical input SVM parameters and a similar random seed, the output "
"SVM models will be different according to the method used (optimized or not) "
"because the samples are not identically processed within OpenCV.");
}
template <class TInputValue, class TOutputValue>
......
......@@ -31,52 +31,64 @@ void TrainVectorBase::DoInit()
{
// Common Parameters for all Learning Application
AddParameter( ParameterType_Group, "io", "Input and output data" );
SetParameterDescription( "io", "This group of parameters allows setting input and output data." );
SetParameterDescription( "io",
"This group of parameters allows setting input and output data." );
AddParameter( ParameterType_InputVectorDataList, "io.vd", "Input Vector Data" );
SetParameterDescription( "io.vd",
"Input geometries used for training (note : all geometries from the layer will be used)" );
"Input geometries used for training (note : all geometries from the layer will be used)" );
AddParameter( ParameterType_InputFilename, "io.stats", "Input XML image statistics file" );
MandatoryOff( "io.stats" );
SetParameterDescription( "io.stats", "XML file containing mean and variance of each feature." );
SetParameterDescription( "io.stats",
"XML file containing mean and variance of each feature." );
AddParameter( ParameterType_OutputFilename, "io.out", "Output model" );
SetParameterDescription( "io.out", "Output file containing the model estimated (.txt format)." );
SetParameterDescription( "io.out",
"Output file containing the model estimated (.txt format)." );
AddParameter( ParameterType_Int, "layer", "Layer Index" );
SetParameterDescription( "layer", "Index of the layer to use in the input vector file." );
SetParameterDescription( "layer",
"Index of the layer to use in the input vector file." );
MandatoryOff( "layer" );
SetDefaultParameterInt( "layer", 0 );
AddParameter(ParameterType_ListView, "feat", "Field names for training features.");
SetParameterDescription("feat","List of field names in the input vector data to be used as features for training.");
SetParameterDescription("feat",
"List of field names in the input vector data to be used as features for training.");
// Add validation data used to compute confusion matrix or contingency table
AddParameter( ParameterType_Group, "valid", "Validation data" );
SetParameterDescription( "valid", "This group of parameters defines validation data." );
SetParameterDescription( "valid",
"This group of parameters defines validation data." );
AddParameter( ParameterType_InputVectorDataList, "valid.vd", "Validation Vector Data" );
AddParameter( ParameterType_InputVectorDataList, "valid.vd",
"Validation Vector Data" );
SetParameterDescription( "valid.vd", "Geometries used for validation "
"(must contain the same fields used for training, all geometries from the layer will be used)" );
MandatoryOff( "valid.vd" );
AddParameter( ParameterType_Int, "valid.layer", "Layer Index" );
SetParameterDescription( "valid.layer", "Index of the layer to use in the validation vector file." );
SetParameterDescription( "valid.layer",
"Index of the layer to use in the validation vector file." );
MandatoryOff( "valid.layer" );
SetDefaultParameterInt( "valid.layer", 0 );
// Add class field if we used validation
AddParameter( ParameterType_ListView, "cfield", "Field containing the class integer label for supervision" );
SetParameterDescription( "cfield", "Field containing the class id for supervision. "
"The values in this field shall be cast into integers. "
"Only geometries with this field available will be taken into account." );
AddParameter( ParameterType_ListView, "cfield",
"Field containing the class integer label for supervision" );
SetParameterDescription( "cfield",
"Field containing the class id for supervision. "
"The values in this field shall be cast into integers. "
"Only geometries with this field available will be taken into account." );
SetListViewSingleSelectionMode( "cfield", true );
// Add a new parameter to compute confusion matrix / contingency table
AddParameter( ParameterType_OutputFilename, "io.confmatout", "Output confusion matrix or contingency table" );
SetParameterDescription( "io.confmatout", "Output file containing the confusion matrix or contingency table (.csv format)."
"The contingency table is output when we unsupervised algorithms is used otherwise the confusion matrix is output." );
AddParameter( ParameterType_OutputFilename, "io.confmatout",
"Output confusion matrix or contingency table" );
SetParameterDescription( "io.confmatout",
"Output file containing the confusion matrix or contingency table (.csv format)."
"The contingency table is output when we unsupervised algorithms is used otherwise the confusion matrix is output." );
MandatoryOff( "io.confmatout" );
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment