Commit a43ca34c authored by Guillaume Pasero's avatar Guillaume Pasero

DOC: doxygen warnings

parent c71b76e4
......@@ -31,7 +31,7 @@ class Layer;
class GeometriesSet;
} // otb namespace
/**\defgroup GeometriesFilters
/**\defgroup GeometriesFilters Filters of geometries sets
* \ingroup gGeometry Filters
* Filters of geometries sets.
*/
......
......@@ -180,7 +180,6 @@ struct FieldCopyTransformation
}
/**
* In-place transformation: does nothing.
* \param[in] inoutFeature \c Feature to change.
* \throw Nothing
*/
void fieldsTransform(ogr::Feature const& itkNotUsed(inoutFeature)) const
......@@ -189,8 +188,8 @@ struct FieldCopyTransformation
}
/**
* By-Copy transformation: copies all fields.
* \param[in] inFeature input \c Feature
* \param[in,out] outFeature output \c Feature
* \param [in] inFeature input \c Feature
* \param [in,out] outFeature output \c Feature
*
* \throw itk::ExceptionObject if the fields cannot be copied.
*/
......@@ -199,8 +198,8 @@ struct FieldCopyTransformation
/**
* Defines the fields in the destination layer.
* The default action is to copy all fieds from one layer to another.
* \param[in] source source \c Layer
* \param[in,out] dest destination \c Layer
* \param [in] source source \c Layer
* \param [in,out] dest destination \c Layer
* \throw itk::ExceptionObject in case the operation can't succeed.
*/
void DefineFields(ogr::Layer const& source, ogr::Layer & dest) const;
......
......@@ -121,7 +121,7 @@ private:
* StatisticsAttributesLabelMapFilter on each channel independently
*
* The feature name is constructed as:
* 'STATS' + '::' + 'Band' + #BandIndex + '::' + StatisticName
* 'STATS' + '::' + 'Band' + band_index + '::' + statistic_name
*
* The ReducedAttributesSet flag allows to tell the internal
* statistics filter to compute only the main attributes (mean, variance, skewness and kurtosis).
......
......@@ -88,7 +88,7 @@ public:
virtual ParametersType& GetParameters(void) const;
/**
* Set the Fixed Parameters
* \param The Fixed parameters of the transform.
* \param param The fixed parameters of the transform.
*/
virtual void SetFixedParameters( const ParametersType & param)
{ this->m_FixedParameters = param; }
......
......@@ -90,7 +90,7 @@ public:
/**
* Set the Fixed Parameters
* \param The Fixed parameters of the transform.
* \param param The fixed parameters of the transform.
*/
virtual void SetFixedParameters( const ParametersType & param)
{ this->m_FixedParameters = param; }
......
......@@ -254,7 +254,7 @@ public:
/**
* Copy the field list from a DataNode
* \param datanode where to get the keywordlist to copy.
* \param dataNode datanode where to get the keywordlist to copy.
*/
void CopyFieldList(const DataNode * dataNode);
......
......@@ -106,7 +106,7 @@ public:
*
* Orientation is expressed in degree in the range [0, 360] with a precision of 10 degrees.
*
* \example FeatureExtraction/SIFTExample.cxx
* \example Patented/SIFTExample.cxx
*
*
* \ingroup OTBDescriptors
......
......@@ -41,7 +41,7 @@ namespace otb
*
* \sa ImageToSIFTKeyPointSetFilter
*
* \example FeatureExtraction/SIFTFastExample.cxx
* \example Patented/SIFTFastExample.cxx
*
* \ingroup OTBDescriptors
*/
......
......@@ -40,7 +40,7 @@ namespace otb
*
* This filter is based on the mathematical parser library muParserX.
* The built in functions and operators list is available at:
* \url{http:*articles.beltoforion.de/article.php?a=muparserx}.
* http://articles.beltoforion.de/article.php?a=muparserx.
*
* In order to use this filter, at least one input image is to be
* set. An associated variable name can be specified or not by using
......
......@@ -79,7 +79,7 @@ public:
/** Setters/Getters to the threshold WeightTrimRate.
* A threshold between 0 and 1 used to save computational time.
* Samples with summary weight \leq 1 - WeightTrimRate do not participate in the next iteration of training.
* Samples with summary weight \f$ w \leq 1 - WeightTrimRate \f$ do not participate in the next iteration of training.
* Set this parameter to 0 to turn off this functionality.
* Default is 0.95
* \see http://docs.opencv.org/modules/ml/doc/boosting.html#cvboostparams-cvboostparams
......
......@@ -90,7 +90,7 @@ public:
itkGetMacro(UseSurrogates, bool);
itkSetMacro(UseSurrogates, bool);
/** Cluster possible values of a categorical variable into K \leq max_categories clusters to find
/** Cluster possible values of a categorical variable into \f$ K \leq MaxCategories \f$ clusters to find
* a suboptimal split. If a discrete variable, on which the training procedure tries to make a split,
* takes more than max_categories values, the precise best subset estimation may take a very long time
* because the algorithm is exponential. Instead, many decision trees engines (including ML) try to find
......
......@@ -55,14 +55,14 @@ public:
/** Setters/Getters to the number of neighbors to use
* Default is 32
* see @http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
* \see http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
*/
itkGetMacro(K, int);
itkSetMacro(K, int);
/** Setters/Getters to IsRegression flag
* Default is False
* see @http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
* \see http://docs.opencv.org/modules/ml/doc/k_nearest_neighbors.html
*/
itkGetMacro(IsRegression, bool);
itkSetMacro(IsRegression, bool);
......
......@@ -117,14 +117,14 @@ public:
itkGetMacro(BackPropMomentScale, double);
itkSetMacro(BackPropMomentScale, double);
/** Initial value \Delta_0 of update-values \Delta_{ij} in RPROP method.
/** Initial value \f$ \Delta_0 \f$ of update-values \f$ \Delta_{ij} \f$ in RPROP method.
* Default is 0.1
* \see http://docs.opencv.org/modules/ml/doc/neural_networks.html
*/
itkGetMacro(RegPropDW0, double);
itkSetMacro(RegPropDW0, double);
/** Update-values lower limit \Delta_{min} in RPROP method.
/** Update-values lower limit \f$ \Delta_{min} \f$ in RPROP method.
* It must be positive. Default is FLT_EPSILON
* \see http://docs.opencv.org/modules/ml/doc/neural_networks.html
*/
......
......@@ -83,55 +83,21 @@ public:
//@}
//Setters of RT parameters (documentation get from opencv doxygen 2.4)
/* the depth of the tree. A low value will likely underfit and conversely a
* high value will likely overfit. The optimal value can be obtained using cross
* validation or other suitable methods. */
itkGetMacro(MaxDepth, int);
itkSetMacro(MaxDepth, int);
/* minimum samples required at a leaf node for it to be split. A reasonable
* value is a small percentage of the total data e.g. 1%. */
itkGetMacro(MinSampleCount, int);
itkSetMacro(MinSampleCount, int);
/* Termination criteria for regression trees. If all absolute differences
* between an estimated value in a node and values of train samples in this node
* are less than this parameter then the node will not be split */
itkGetMacro(RegressionAccuracy, double);
itkSetMacro(RegressionAccuracy, double);
itkGetMacro(ComputeSurrogateSplit, bool);
itkSetMacro(ComputeSurrogateSplit, bool);
/* Cluster possible values of a categorical variable into K \leq
* max_categories clusters to find a suboptimal split. If a discrete variable,
* on which the training procedure tries to make a split, takes more than
* max_categories values, the precise best subset estimation may take a very
* long time because the algorithm is exponential. Instead, many decision
* trees engines (including ML) try to find sub-optimal split in this case by
* clustering all the samples into max categories clusters that is some
* categories are merged together. The clustering is applied only in n>2-class
* classification problems for categorical variables with N > max_categories
* possible values. In case of regression and 2-class classification the
* optimal split can be found efficiently without employing clustering, thus
* the parameter is not used in these cases.
*/
itkGetMacro(MaxNumberOfCategories, int);
itkSetMacro(MaxNumberOfCategories, int);
/* The array of a priori class probabilities, sorted by the class label
* value. The parameter can be used to tune the decision tree preferences toward
* a certain class. For example, if you want to detect some rare anomaly
* occurrence, the training base will likely contain much more normal cases than
* anomalies, so a very good classification performance will be achieved just by
* considering every case as normal. To avoid this, the priors can be specified,
* where the anomaly probability is artificially increased (up to 0.5 or even
* greater), so the weight of the misclassified anomalies becomes much bigger,
* and the tree is adjusted properly. You can also think about this parameter as
* weights of prediction categories which determine relative weights that you
* give to misclassification. That is, if the weight of the first category is 1
* and the weight of the second category is 10, then each mistake in predicting
* the second category is equivalent to making 10 mistakes in predicting the
first category. */
std::vector<float> GetPriors() const
{
return m_Priors;
......@@ -141,29 +107,22 @@ public:
{
m_Priors = priors;
}
/* If true then variable importance will be calculated and then it can be
retrieved by CvRTrees::get_var_importance(). */
itkGetMacro(CalculateVariableImportance, bool);
itkSetMacro(CalculateVariableImportance, bool);
/* The size of the randomly selected subset of features at each tree node and
* that are used to find the best split(s). If you set it to 0 then the size will
be set to the square root of the total number of features. */
itkGetMacro(MaxNumberOfVariables, int);
itkSetMacro(MaxNumberOfVariables, int);
/* The maximum number of trees in the forest (surprise, surprise). Typically
* the more trees you have the better the accuracy. However, the improvement in
* accuracy generally diminishes and asymptotes pass a certain number of
* trees. Also to keep in mind, the number of tree increases the prediction time
linearly. */
itkGetMacro(MaxNumberOfTrees, int);
itkSetMacro(MaxNumberOfTrees, int);
/* Sufficient accuracy (OOB error) */
itkGetMacro(ForestAccuracy, float);
itkSetMacro(ForestAccuracy, float);
/* The type of the termination criteria */
itkGetMacro(TerminationCriteria, int);
itkSetMacro(TerminationCriteria, int);
/* Perform regression instead of classification */
itkGetMacro(RegressionMode, bool);
itkSetMacro(RegressionMode, bool);
......@@ -193,17 +152,66 @@ private:
void operator =(const Self&); //purposely not implemented
CvRTrees * m_RFModel;
/** The depth of the tree. A low value will likely underfit and conversely a
* high value will likely overfit. The optimal value can be obtained using cross
* validation or other suitable methods. */
int m_MaxDepth;
/** minimum samples required at a leaf node for it to be split. A reasonable
* value is a small percentage of the total data e.g. 1%. */
int m_MinSampleCount;
/** Termination criteria for regression trees. If all absolute differences
* between an estimated value in a node and values of train samples in this node
* are less than this parameter then the node will not be split */
float m_RegressionAccuracy;
bool m_ComputeSurrogateSplit;
/** Cluster possible values of a categorical variable into
* \f$ K \leq MaxCategories \f$
* clusters to find a suboptimal split. If a discrete variable,
* on which the training procedure tries to make a split, takes more than
* max_categories values, the precise best subset estimation may take a very
* long time because the algorithm is exponential. Instead, many decision
* trees engines (including ML) try to find sub-optimal split in this case by
* clustering all the samples into max categories clusters that is some
* categories are merged together. The clustering is applied only in n>2-class
* classification problems for categorical variables with N > max_categories
* possible values. In case of regression and 2-class classification the
* optimal split can be found efficiently without employing clustering, thus
* the parameter is not used in these cases.
*/
int m_MaxNumberOfCategories;
/** The array of a priori class probabilities, sorted by the class label
* value. The parameter can be used to tune the decision tree preferences toward
* a certain class. For example, if you want to detect some rare anomaly
* occurrence, the training base will likely contain much more normal cases than
* anomalies, so a very good classification performance will be achieved just by
* considering every case as normal. To avoid this, the priors can be specified,
* where the anomaly probability is artificially increased (up to 0.5 or even
* greater), so the weight of the misclassified anomalies becomes much bigger,
* and the tree is adjusted properly. You can also think about this parameter as
* weights of prediction categories which determine relative weights that you
* give to misclassification. That is, if the weight of the first category is 1
* and the weight of the second category is 10, then each mistake in predicting
* the second category is equivalent to making 10 mistakes in predicting the
* first category. */
std::vector<float> m_Priors;
/** If true then variable importance will be calculated and then it can be
* retrieved by CvRTrees::get_var_importance(). */
bool m_CalculateVariableImportance;
/** The size of the randomly selected subset of features at each tree node and
* that are used to find the best split(s). If you set it to 0 then the size will
* be set to the square root of the total number of features. */
int m_MaxNumberOfVariables;
/** The maximum number of trees in the forest (surprise, surprise). Typically
* the more trees you have the better the accuracy. However, the improvement in
* accuracy generally diminishes and asymptotes pass a certain number of
* trees. Also to keep in mind, the number of tree increases the prediction time
*linearly. */
int m_MaxNumberOfTrees;
/** Sufficient accuracy (OOB error) */
float m_ForestAccuracy;
/** The type of the termination criteria */
int m_TerminationCriteria;
/** Perform regression instead of classification */
bool m_RegressionMode;
};
} // end namespace otb
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment