Commit e4edd3c8 authored by Ludovic Hussonnois's avatar Ludovic Hussonnois

Merge remote-tracking branch 'remotes/origin/develop' into contingency_table

parents eb111e9d d3bd4b1f
......@@ -22,12 +22,17 @@
# Helper to perform the initial git clone and checkout.
function(_git_clone git_executable git_repository git_tag module_dir)
execute_process(
COMMAND "${git_executable}" clone "${git_repository}" "${module_dir}"
RESULT_VARIABLE error_code
OUTPUT_QUIET
ERROR_QUIET
)
set(retryCount 0)
set(error_code 1)
while(error_code AND (retryCount LESS 3))
execute_process(
COMMAND "${git_executable}" clone "${git_repository}" "${module_dir}"
RESULT_VARIABLE error_code
OUTPUT_QUIET
ERROR_QUIET
)
math(EXPR retryCount "${retryCount}+1")
endwhile()
if(error_code)
message(FATAL_ERROR "Failed to clone repository: '${git_repository}'")
endif()
......
......@@ -4,10 +4,10 @@ Classification
Pixel based classification
--------------------------
Orfeo ToolBox ships with a set of application to perform supervised
pixel-based image classification. This framework allows to learn from
multiple images, and using several machine learning method such as
SVM, Bayes, KNN, Random Forests, Artificial Neural Network, and
Orfeo ToolBox ships with a set of application to perform supervised or
unsupervised pixel-based image classification. This framework allows
to learn from multiple images, and using several machine learning method
such as SVM, Bayes, KNN, Random Forests, Artificial Neural Network, and
others...(see application help of ``TrainImagesClassifier`` and
``TrainVectorClassifier`` for further details about all the available
classifiers). Here is an overview of the complete workflow:
......@@ -235,7 +235,7 @@ image.
class required)
- *Mode = proportional:* For each image :math:`i` and each class :math:`c`,
:math:`N_i( c ) = \frac{M * T_i( c )}{sum_k( T_k(c)}`
:math:`N_i( c ) = \frac{M * T_i(c)}{sum_k(T_k(c))}`
- *Mode = equal:* For each image :math:`i` and each class :math:`c`,
:math:`N_i( c ) = \frac{M}{L}`
- *Mode = custom:* For each image :math:`i` and each class :math:`c`,
......@@ -347,8 +347,9 @@ using the ``TrainVectorClassifier`` application.
-feat band_0 band_1 band_2 band_3 band_4 band_5 band_6
The ``-classifier`` parameter allows to choose which machine learning
model algorithm to train. Please refer to the
``TrainVectorClassifier`` application reference documentation.
model algorithm to train. You have the possibility to do the unsupervised
classification,for it, you must to choose the Shark kmeans classifier.
Please refer to the ``TrainVectorClassifier`` application reference documentation.
In case of multiple samples files, you can add them to the ``-io.vd``
parameter (see `Working with several images`_ section).
......@@ -409,6 +410,11 @@ class too, based on the
`ConfusionMatrixCalculator <http://www.orfeo-toolbox.org/doxygen-current/classotb_1_1ConfusionMatrixCalculator.html>`_
class.
If you have made an unsupervised classification, it must be specified
to the ``ConputeConfusionMatrix`` application. In this case, a contingency table
have to be create rather than a confusion matrix. For further details,
see ``format`` parameter in the application help of *ConputeConfusionMatrix*.
::
otbcli_ComputeConfusionMatrix -in labeled_image.tif
......
......@@ -45,7 +45,7 @@
#include "otbImageFileReader.h"
#include "itkUnaryFunctorImageFilter.h"
#include "itkRecursiveGaussianImageFilter.h"
#include "otbWarpImageFilter.h"
#include "itkWarpImageFilter.h"
#include "itkMeanReciprocalSquareDifferenceImageToImageMetric.h"
// Software Guide : BeginCodeSnippet
......@@ -255,7 +255,7 @@ int main(int argc, char** argv)
dfWriter->SetFileName(argv[4]);
dfWriter->Update();
typedef otb::WarpImageFilter<InputImageType, InputImageType,
typedef itk::WarpImageFilter<InputImageType, InputImageType,
DisplacementFieldType> WarperType;
WarperType::Pointer warper = WarperType::New();
......
......@@ -42,7 +42,7 @@
// Software Guide : BeginCodeSnippet
#include "otbNCCRegistrationFilter.h"
#include "itkRecursiveGaussianImageFilter.h"
#include "otbWarpImageFilter.h"
#include "itkWarpImageFilter.h"
// Software Guide : EndCodeSnippet
#include "otbImageOfVectorsToMonoChannelExtractROI.h"
......@@ -211,7 +211,7 @@ int main(int argc, char** argv)
dfWriter->SetFileName(argv[4]);
dfWriter->Update();
typedef otb::WarpImageFilter<MovingImageType, MovingImageType,
typedef itk::WarpImageFilter<MovingImageType, MovingImageType,
DisplacementFieldType> WarperType;
WarperType::Pointer warper = WarperType::New();
......
......@@ -47,7 +47,7 @@
#include "itkWindowedSincInterpolateImageFunction.h"
#include "itkGradientDescentOptimizer.h"
#include "otbBSplinesInterpolateDisplacementFieldGenerator.h"
#include "otbWarpImageFilter.h"
#include "itkWarpImageFilter.h"
// Software Guide : EndCodeSnippet
#include "otbImageFileReader.h"
......@@ -414,7 +414,7 @@ int main(int argc, char* argv[])
// Software Guide : BeginCodeSnippet
typedef otb::WarpImageFilter<ImageType, ImageType,
typedef itk::WarpImageFilter<ImageType, ImageType,
DisplacementFieldType> ImageWarperType;
// Software Guide : EndCodeSnippet
......
......@@ -35,7 +35,7 @@
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
#include "otbAssymmetricFusionOfLineDetectorImageFilter.h"
#include "otbAsymmetricFusionOfLineDetectorImageFilter.h"
// Software Guide : EndCodeSnippet
#include "otbImage.h"
......@@ -85,7 +85,7 @@ int main(int argc, char * argv[])
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef otb::AssymmetricFusionOfLineDetectorImageFilter<InternalImageType,
typedef otb::AsymmetricFusionOfLineDetectorImageFilter<InternalImageType,
InternalImageType>
FilterType;
// Software Guide : EndCodeSnippet
......
......@@ -33,23 +33,8 @@ add_executable(SOMExample SOMExample.cxx)
target_link_libraries(SOMExample ${OTB_LIBRARIES})
if(OTBLibSVM_LOADED)
add_executable(SVMImageClassificationExample SVMImageClassificationExample.cxx)
target_link_libraries(SVMImageClassificationExample ${OTB_LIBRARIES})
add_executable(SVMImageEstimatorClassificationMultiExample SVMImageEstimatorClassificationMultiExample.cxx)
target_link_libraries(SVMImageEstimatorClassificationMultiExample ${OTB_LIBRARIES})
add_executable(SVMImageModelEstimatorExample SVMImageModelEstimatorExample.cxx)
target_link_libraries(SVMImageModelEstimatorExample ${OTB_LIBRARIES})
add_executable(SVMPointSetClassificationExample SVMPointSetClassificationExample.cxx)
target_link_libraries(SVMPointSetClassificationExample ${OTB_LIBRARIES})
add_executable(SVMPointSetExample SVMPointSetExample.cxx)
target_link_libraries(SVMPointSetExample ${OTB_LIBRARIES})
add_executable(SVMPointSetModelEstimatorExample SVMPointSetModelEstimatorExample.cxx)
target_link_libraries(SVMPointSetModelEstimatorExample ${OTB_LIBRARIES})
endif()
if(OTBOpenCV_LOADED)
......
This diff is collapsed.
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Software Guide : BeginCommandLineArgs
// INPUTS: {ROI_QB_MUL_1.png}, {ROI_mask.png}
// OUTPUTS: {svm_image_model.svn}
// Software Guide : EndCommandLineArgs
// Software Guide : BeginLatex
// This example illustrates the use of the
// \doxygen{otb}{SVMImageModelEstimator} class. This class allows the
// estimation of a SVM model (supervised learning) from a feature
// image and an image of labels. In this example, we will train an SVM
// to separate between water and non-water pixels by using the RGB
// values only. The images used for this example are shown in
// figure~\ref{fig:SVMROIS}.
// \begin{figure}
// \center
// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1.eps}
// \includegraphics[width=0.45\textwidth]{ROI_mask.eps}
// \itkcaption[SVM Image Model Estimation]{Images used for the
// estimation of the SVM model. Left: RGB image. Right: image of labels.}
// \label{fig:SVMROIS}
// \end{figure}
// The first thing to do is include the header file for the class.
//
// Software Guide : EndLatex
#include "itkMacro.h"
#include "otbImage.h"
#include "otbVectorImage.h"
#include <iostream>
// Software Guide : BeginCodeSnippet
#include "otbSVMImageModelEstimator.h"
// Software Guide : EndCodeSnippet
#include "otbImageFileReader.h"
int main(int itkNotUsed(argc), char* argv[])
{
const char* inputImageFileName = argv[1];
const char* trainingImageFileName = argv[2];
const char* outputModelFileName = argv[3];
// Software Guide : BeginLatex
//
// We define the types for the input and training images. Even if the
// input image will be an RGB image, we can read it as a 3 component
// vector image. This simplifies the interfacing with OTB's SVM
// framework.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef unsigned char InputPixelType;
const unsigned int Dimension = 2;
typedef otb::VectorImage<InputPixelType, Dimension> InputImageType;
typedef otb::Image<InputPixelType, Dimension> TrainingImageType;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// The \doxygen{otb}{SVMImageModelEstimator} class is templated over
// the input (features) and the training (labels) images.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef otb::SVMImageModelEstimator<InputImageType,
TrainingImageType> EstimatorType;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// As usual, we define the readers for the images.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef otb::ImageFileReader<InputImageType> InputReaderType;
typedef otb::ImageFileReader<TrainingImageType> TrainingReaderType;
InputReaderType::Pointer inputReader = InputReaderType::New();
TrainingReaderType::Pointer trainingReader = TrainingReaderType::New();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We read the images. It is worth to note that, in order to ensure
// the pipeline coherence, the output of the objects which precede the
// model estimator in the pipeline, must be up to date, so we call
// the corresponding \code{Update} methods.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
inputReader->SetFileName(inputImageFileName);
trainingReader->SetFileName(trainingImageFileName);
inputReader->Update();
trainingReader->Update();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We can now instantiate the model estimator and set its parameters.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
EstimatorType::Pointer svmEstimator = EstimatorType::New();
svmEstimator->SetInputImage(inputReader->GetOutput());
svmEstimator->SetTrainingImage(trainingReader->GetOutput());
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// The model estimation procedure is triggered by calling the
// estimator's \code{Update} method.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
svmEstimator->Update();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// Finally, the estimated model can be saved to a file for later use.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
svmEstimator->SaveModel(outputModelFileName);
// Software Guide : EndCodeSnippet
return EXIT_SUCCESS;
}
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Software Guide : BeginCommandLineArgs
// INPUTS: {svm_model.svn}
// OUTPUTS:
// Software Guide : EndCommandLineArgs
#include "itkMacro.h"
#include <iostream>
#include <cstdlib>
// Software Guide : BeginLatex
// This example illustrates the use of the
// \doxygen{otb}{SVMClassifier} class for performing SVM
// classification on pointsets.
// The first thing to do is include the header file for the
// class. Since the \doxygen{otb}{SVMClassifier} takes
// \doxygen{itk}{ListSample}s as input, the class
// \doxygen{itk}{PointSetToListSampleAdaptor} is needed.
//
// We start by including the needed header files.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
#include "itkPointSetToListSampleAdaptor.h"
#include "otbSVMClassifier.h"
// Software Guide : EndCodeSnippet
int main(int itkNotUsed(argc), char* argv[])
{
// Software Guide : BeginLatex
//
// In the framework of supervised learning and classification, we will
// always use feature vectors for the characterization of the
// classes. On the other hand, the class labels are scalar
// values. Here, we start by defining the type of the features as the
// \code{PixelType}, which will be used to define the feature
// \code{VectorType}. We also declare the type for the labels.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef float InputPixelType;
typedef std::vector<InputPixelType> InputVectorType;
typedef int LabelPixelType;
// Software Guide : EndCodeSnippet
const unsigned int Dimension = 2;
// Software Guide : BeginLatex
//
// We can now proceed to define the point sets used for storing the
// features and the labels.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef itk::PointSet<InputVectorType, Dimension> MeasurePointSetType;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We will need to get access to the data stored in the point sets, so
// we define the appropriate for the points and the points containers
// used by the point sets (see the section \ref{sec:PointSetSection}
// for more information on how to use point sets).
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef MeasurePointSetType::PointType MeasurePointType;
typedef MeasurePointSetType::PointsContainer MeasurePointsContainer;
MeasurePointSetType::Pointer tPSet = MeasurePointSetType::New();
MeasurePointsContainer::Pointer tCont = MeasurePointsContainer::New();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We need now to build the test set for the SVM. In this
// simple example, we will build a SVM who classes points depending on
// which side of the line $x=y$ they are located. We start by
// generating 500 random points.
//
// Software Guide : EndLatex
srand(0);
unsigned int pointId;
// Software Guide : BeginCodeSnippet
int lowest = 0;
int range = 1000;
for (pointId = 0; pointId < 100; pointId++)
{
MeasurePointType tP;
int x_coord = lowest + static_cast<int>(range * (rand() / (RAND_MAX + 1.0)));
int y_coord = lowest + static_cast<int>(range * (rand() / (RAND_MAX + 1.0)));
std::cout << "coords : " << x_coord << " " << y_coord << std::endl;
tP[0] = x_coord;
tP[1] = y_coord;
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We push the features in the vector after a normalization which is
// useful for SVM convergence.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
InputVectorType measure;
measure.push_back(static_cast<InputPixelType>((x_coord * 1.0 -
lowest) / range));
measure.push_back(static_cast<InputPixelType>((y_coord * 1.0 -
lowest) / range));
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// And we insert the points in the points container.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
tCont->InsertElement(pointId, tP);
tPSet->SetPointData(pointId, measure);
}
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// After the loop, we set the points container to the point set.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
tPSet->SetPoints(tCont);
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// Once the pointset is ready, we must transform it to a sample which
// is compatible with the classification framework. We will use a
// \doxygen{itk}{Statistics::PointSetToListSampleAdaptor} for this
// task. This class is templated over the point set type used for
// storing the measures.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef itk::Statistics::PointSetToListSampleAdaptor<MeasurePointSetType>
SampleType;
SampleType::Pointer sample = SampleType::New();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// After instantiation, we can set the point set as an imput of our
// sample adaptor.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
sample->SetPointSet(tPSet);
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// Now, we need to declare the SVM model which is to be used by the
// classifier. The SVM model is templated over the type of value used
// for the measures and the type of pixel used for the labels.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef otb::SVMModel<SampleType::MeasurementVectorType::ValueType,
LabelPixelType> ModelType;
ModelType::Pointer model = ModelType::New();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// After instantiation, we can load a model saved to a file (see
// section \ref{sec:LearningWithPointSets} for an example of model
// estimation and storage to a file).
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
model->LoadModel(argv[1]);
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We have now all the elements to create a classifier. The classifier
// is templated over the sample type (the type of the data to be
// classified) and the label type (the type of the output of the classifier).
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
typedef otb::SVMClassifier<SampleType, LabelPixelType> ClassifierType;
ClassifierType::Pointer classifier = ClassifierType::New();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We set the classifier parameters : number of classes, SVM model,
// the sample data. And we trigger the classification process by
// calling the \code{Update} method.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
int numberOfClasses = model->GetNumberOfClasses();
classifier->SetNumberOfClasses(numberOfClasses);
classifier->SetModel(model);
classifier->SetInput(sample.GetPointer());
classifier->Update();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// After the classification step, we usually want to get the
// results. The classifier gives an output under the form of a sample
// list. This list supports the classical STL iterators.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
ClassifierType::OutputType* membershipSample =
classifier->GetOutput();
ClassifierType::OutputType::ConstIterator m_iter =
membershipSample->Begin();
ClassifierType::OutputType::ConstIterator m_last =
membershipSample->End();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We will iterate through the list, get the labels and compute the
// classification error.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
double error = 0.0;
pointId = 0;
while (m_iter != m_last)
{
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// We get the label for each point.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
ClassifierType::ClassLabelType label = m_iter.GetClassLabel();
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// And we compare it to the corresponding one of the test set.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
InputVectorType measure;
tPSet->GetPointData(pointId, &measure);
ClassifierType::ClassLabelType expectedLabel;
if (measure[0] < measure[1]) expectedLabel = -1;
else expectedLabel = 1;
double dist = fabs(measure[0] - measure[1]);