diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt
index 1dbb019d597eeacea397d03e89e99b683d7a4522..23ae79fadf236a7c7938983ba2e81e760e166d7e 100644
--- a/Examples/CMakeLists.txt
+++ b/Examples/CMakeLists.txt
@@ -12,4 +12,5 @@ SUBDIRS(
   ChangeDetection
   Visu
   Learning
+  Classification
 )
diff --git a/Examples/ChangeDetection/CorrelChDet.cxx b/Examples/ChangeDetection/CorrelChDet.cxx
index a990403bf06ee5fefa6393f793499cddb3db200f..d1ae71059e1c391dc19bf144b65f36e42e55823a 100644
--- a/Examples/ChangeDetection/CorrelChDet.cxx
+++ b/Examples/ChangeDetection/CorrelChDet.cxx
@@ -20,75 +20,185 @@
 #include "otbStreamingImageFileWriter.h"
 #include "otbImage.h"
 #include "itkShiftScaleImageFilter.h"
-#include "otbCorrelationChangeDetector.h"
 #include "otbCommandProgressUpdate.h"
 
+//  Software Guide : BeginCommandLineArgs
+//    INPUTS: {ERSBefore.png}, {ERSAfter.png}
+//    OUTPUTS: {CorrChDet.tif}
+//    15
+//  Software Guide : EndCommandLineArgs
+
+
+
+//  Software Guide : BeginLatex
+// This example illustrates the class
+// \doxygen{otb::CorrelationChangeDetector} for detecting changes
+// between pairs of images. This filter computes the correlation coefficient in
+// the neighborhood of each pixel of the pair of images to be compared. This
+// example will use the images shown in
+// figure ~\ref{fig:CORRCHDETINIM}. These correspond to two ERS acquisitions before and during a flood.
+// \begin{figure}
+// \center
+// \includegraphics[width=0.35\textwidth]{ERSBefore.eps}
+// \includegraphics[width=0.35\textwidth]{ERSAfter.eps}
+// \itkcaption[ERS Images for Change Detection]{Images used for the
+// change detection. Left: Before the flood. Right: during the flood.} 
+// \label{fig:CORRCHDETINIM}
+// \end{figure}
+//
+// We start by including the corresponding header file.
+//
+//  Software Guide : EndLatex 
+
+//  Software Guide : BeginCodeSnippet
+#include "otbCorrelationChangeDetector.h"
+//  Software Guide : EndCodeSnippet
+
 int main(int argc, char* argv[] ) 
 {
 
   if( argc < 5 )
     {
     std::cerr << "Usage: " << std::endl;
-    std::cerr << argv[0] << " inputImageFile1 inputImageFile2  radius outputImageFile " << std::endl;
+    std::cerr << argv[0] << " inputImageFile1 inputImageFile2  outputImageFile radius" << std::endl;
     return -1;
     }
 
   // Define the dimension of the images
   const unsigned int Dimension = 2;
 
-  // Declare the types of the images
+  //  Software Guide : BeginLatex
+  // We start by declaring the types for the two input images, the
+  // change image and the image to be stored in a file for visualization.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   typedef float InternalPixelType;
   typedef unsigned char OutputPixelType;
   typedef otb::Image<InternalPixelType, Dimension>  InputImageType1;
   typedef otb::Image<InternalPixelType, Dimension>  InputImageType2;
   typedef otb::Image<InternalPixelType, Dimension>  ChangeImageType;
   typedef otb::Image<OutputPixelType, Dimension>  OutputImageType;
-
+  //  Software Guide : EndCodeSnippet
+  
+  //  Software Guide : BeginLatex
+  //
+  //  We can now declare the types for the readers. Since the images
+  //  can be vey large, we will force the pipeline to use
+  //  streaming. For this purpose, the file writer will be
+  //  streamed. This is achieved by using the
+  //  \doxygen{otb::StreamingImageFileWriter} class.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   typedef otb::ImageFileReader< InputImageType1 >  ReaderType1;
   typedef otb::ImageFileReader< InputImageType2 >  ReaderType2;
   typedef otb::StreamingImageFileWriter< OutputImageType >  WriterType;
+  //  Software Guide : EndCodeSnippet
+  
+  //  Software Guide : BeginLatex
+  //
+  //  The change detector will give a response which is normalized
+  //  between 0 and 1. Before
+  //  saving the image to a file in, for instance, PNG format, we will
+  //  rescale the results of the change detection in order to use all
+  //  the output pixel type range of values.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   typedef itk::ShiftScaleImageFilter< ChangeImageType,
                                             OutputImageType > RescalerType; 
-
-
+  //  Software Guide : EndCodeSnippet
   
-  // Declare the type for the filter
+  //  Software Guide : BeginLatex
+  //
+  //  The \doxygen{otb::CorrelationChangeDetector} is templated over
+  //  the types of the two input images and the type of the generated change
+  //  image.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   typedef otb::CorrelationChangeDetector<
                                 InputImageType1,
                                 InputImageType2,
                                 ChangeImageType  >       FilterType;
-
+  //  Software Guide : EndCodeSnippet
   
+  //  Software Guide : BeginLatex
+  //
+  //  The different elements of the pipeline can now be instantiated.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   ReaderType1::Pointer reader1 = ReaderType1::New();
   ReaderType2::Pointer reader2 = ReaderType2::New();
   WriterType::Pointer writer = WriterType::New();
   FilterType::Pointer   filter = FilterType::New();
   RescalerType::Pointer rescaler = RescalerType::New();
-
+  //  Software Guide : EndCodeSnippet
   const char * inputFilename1  = argv[1];
   const char * inputFilename2  = argv[2];
-  const char * outputFilename = argv[4];
-
+  const char * outputFilename = argv[3];
+  //  Software Guide : BeginLatex
+  //
+  //  We set the parametters of the different elements of the pipeline.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   reader1->SetFileName( inputFilename1  );
   reader2->SetFileName( inputFilename2  );
   writer->SetFileName( outputFilename );
   
   float scale = itk::NumericTraits< OutputPixelType >::max();
   rescaler->SetScale( scale );
+  //  Software Guide : EndCodeSnippet
+  
+  //  Software Guide : BeginLatex
+  //
+  //  The only parametter for this change detector is the radius of
+  //  the window used for computing the correlation coefficient.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
+  filter->SetRadius( atoi(argv[4]) );
+  //  Software Guide : EndCodeSnippet
+  
+  //  Software Guide : BeginLatex
+  //
+  //  We build the pipeline by plugging all the elements together.
+  // 
+  //  Software Guide : EndLatex 
 
+  //  Software Guide : BeginCodeSnippet
   filter->SetInput1( reader1->GetOutput() ); 
   filter->SetInput2( reader2->GetOutput() );
-  filter->SetRadius( atoi(argv[3]) );
-
   rescaler->SetInput( filter->GetOutput() );
   writer->SetInput( rescaler->GetOutput() );
-
-
+  //  Software Guide : EndCodeSnippet
+
+  //  Software Guide : BeginLatex
+  //
+  //  Since the processing time of large images can be long, it is
+  //  interesting to monitor the evolution of the computation. In
+  //  order to do so, the change detectors can use the
+  //  command/observer design pattern. This is easily done by
+  //  attaching an observer to the filter.
+  // 
+  //  Software Guide : EndLatex 
+
+  //  Software Guide : BeginCodeSnippet
   typedef otb::CommandProgressUpdate<FilterType> CommandType;
 
   CommandType::Pointer observer = CommandType::New();
   filter->AddObserver(itk::ProgressEvent(), observer);
-
+  //  Software Guide : EndCodeSnippet
 
   
   try 
@@ -102,6 +212,18 @@ int main(int argc, char* argv[] )
     return -1;
     } 
 
+//  Software Guide : BeginLatex
+// Figure \ref{fig:RESCORRCHDET} shows the result of the change
+// detection by local correlation.
+// \begin{figure}
+// \center
+// \includegraphics[width=0.35\textwidth]{CorrChDet.eps}
+// \itkcaption[Correlation Change Detection Results]{Result of the
+// correlation change detector}  
+// \label{fig:RESCORRCHDET}
+// \end{figure}
+//  Software Guide : EndLatex
+  
   
   return 0;
 
diff --git a/Examples/Classification/BayesianPluginClassifier.cxx b/Examples/Classification/BayesianPluginClassifier.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..de878d0c3e8a61bbaeba38697749fd04e8337f15
--- /dev/null
+++ b/Examples/Classification/BayesianPluginClassifier.cxx
@@ -0,0 +1,344 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: BayesianPluginClassifier.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/11/19 16:31:51 $
+  Version:   $Revision: 1.20 $
+
+  Copyright (c) Insight Software Consortium. All rights reserved.
+  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+// Software Guide : BeginLatex
+//
+// \index{Statistics!Bayesian plugin classifier}
+// \index{itk::Statistics::SampleClassifier}
+// \index{itk::Statistics::GaussianDensityFunction}
+// \index{itk::Statistics::NormalVariateGenerator}
+//
+// In this example, we present a system that places measurement vectors into
+// two Gaussian classes. The Figure~\ref{fig:BayesianPluginClassifier} shows
+// all the components of the classifier system and the data flow. This system
+// differs with the previous k-means clustering algorithms in several
+// ways. The biggest difference is that this classifier uses the
+// \subdoxygen{itk::Statistics}{GaussianDensityFunction}s as membership functions
+// instead of the \subdoxygen{itk::Statistics}{EuclideanDistance}. Since the
+// membership function is different, the membership function requires a
+// different set of parameters, mean vectors and covariance matrices. We
+// choose the \subdoxygen{itk::Statistics}{MeanCalculator} (sample mean) and the
+// \subdoxygen{itk::Statistics}{CovarianceCalculator} (sample covariance) for the
+// estimation algorithms of the two parameters. If we want more robust
+// estimation algorithm, we can replace these estimation algorithms with more
+// alternatives without changing other components in the classifier system.
+// 
+// It is a bad idea to use the same sample for test and training
+// (parameter estimation) of the parameters. However, for simplicity, in
+// this example, we use a sample for test and training.
+//
+// \begin{figure}
+//   \centering
+//   \includegraphics[width=0.9\textwidth]{BayesianPluginClassifier.eps}
+//   \itkcaption[Bayesian plug-in classifier for two Gaussian classes]{Bayesian
+//   plug-in classifier for two Gaussian classes.}
+//  \protect\label{fig:BayesianPluginClassifier}
+// \end{figure}
+//
+// We use the \subdoxygen{itk::Statistics}{ListSample} as the sample (test
+// and training). The \doxygen{itk::Vector} is our measurement vector
+// class. To store measurement vectors into two separate sample
+// containers, we use the \subdoxygen{itk::Statistics}{Subsample} objects. 
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+#include "itkVector.h"
+#include "itkListSample.h"
+#include "itkSubsample.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// The following two files provides us the parameter estimation algorithms.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkMeanCalculator.h"
+#include "itkCovarianceCalculator.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// The following files define the components required by ITK statistical
+// classification framework: the decision rule, the membership
+// function, and the classifier. 
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkMaximumRatioDecisionRule.h"
+#include "itkGaussianDensityFunction.h"
+#include "itkSampleClassifier.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// We will fill the sample with random variables from two normal 
+// distribution using the \subdoxygen{itk::Statistics}{NormalVariateGenerator}.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkNormalVariateGenerator.h"
+// Software Guide : EndCodeSnippet
+
+int main( int,  char *[])
+{
+  // Software Guide : BeginLatex
+  //
+  // Since the NormalVariateGenerator class only supports 1-D, we define our
+  // measurement vector type as a one component vector. We then, create a
+  // ListSample object for data inputs.
+  // 
+  // We also create two Subsample objects that will store
+  // the measurement vectors in \code{sample} into two separate
+  // sample containers. Each Subsample object stores only the
+  // measurement vectors belonging to a single class. This class sample
+  // will be used by the parameter estimation algorithms.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Vector< double, 1 > MeasurementVectorType;
+  typedef itk::Statistics::ListSample< MeasurementVectorType > SampleType;
+  SampleType::Pointer sample = SampleType::New();
+  sample->SetMeasurementVectorSize( 1 ); // length of measurement vectors
+                                         // in the sample.
+
+  typedef itk::Statistics::Subsample< SampleType > ClassSampleType;
+  std::vector< ClassSampleType::Pointer > classSamples;
+  for ( unsigned int i = 0 ; i < 2 ; ++i )
+    {
+    classSamples.push_back( ClassSampleType::New() );
+    classSamples[i]->SetSample( sample );
+    }
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The following code snippet creates a NormalVariateGenerator
+  // object. Since the random variable generator returns values according to
+  // the standard normal distribution (the mean is zero, and the standard
+  // deviation is one) before pushing random values into the \code{sample},
+  // we change the mean and standard deviation. We want two normal (Gaussian)
+  // distribution data. We have two for loops. Each for loop uses different
+  // mean and standard deviation. Before we fill the \code{sample} with the
+  // second distribution data, we call \code{Initialize(random seed)} method,
+  // to recreate the pool of random variables in the
+  // \code{normalGenerator}. In the second for loop, we fill the two class
+  // samples with measurement vectors using the \code{AddInstance()} method.
+  //
+  // To see the probability density plots from the two distributions,
+  // refer to Figure~\ref{fig:TwoNormalDensityFunctionPlot}.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::NormalVariateGenerator NormalGeneratorType;
+  NormalGeneratorType::Pointer normalGenerator = NormalGeneratorType::New();
+
+  normalGenerator->Initialize( 101 );
+
+  MeasurementVectorType mv;
+  double mean = 100;
+  double standardDeviation = 30;
+  SampleType::InstanceIdentifier id = 0UL;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv.Fill( (normalGenerator->GetVariate() * standardDeviation ) + mean);
+    sample->PushBack( mv );
+    classSamples[0]->AddInstance( id );
+    ++id;
+    }
+
+  normalGenerator->Initialize( 3024 );
+  mean = 200;
+  standardDeviation = 30;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv.Fill( (normalGenerator->GetVariate() * standardDeviation ) + mean);
+    sample->PushBack( mv );
+    classSamples[1]->AddInstance( id );
+    ++id;
+    }
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // In the following code snippet, notice that the template argument for the
+  // MeanCalculator and CovarianceCalculator is \code{ClassSampleType} (i.e.,
+  // type of Subsample) instead of SampleType (i.e., type of ListSample). This
+  // is because the parameter estimation algorithms are applied to the class
+  // sample.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::MeanCalculator< ClassSampleType > MeanEstimatorType;
+  typedef itk::Statistics::CovarianceCalculator< ClassSampleType >
+    CovarianceEstimatorType;
+  
+  std::vector< MeanEstimatorType::Pointer > meanEstimators;
+  std::vector< CovarianceEstimatorType::Pointer > covarianceEstimators;
+
+  for ( unsigned int i = 0 ; i < 2 ; ++i )
+    {
+    meanEstimators.push_back( MeanEstimatorType::New() );
+    meanEstimators[i]->SetInputSample( classSamples[i] );
+    meanEstimators[i]->Update();
+    
+    covarianceEstimators.push_back( CovarianceEstimatorType::New() );
+    covarianceEstimators[i]->SetInputSample( classSamples[i] );
+    covarianceEstimators[i]->SetMean( meanEstimators[i]->GetOutput() );
+    covarianceEstimators[i]->Update();
+    }
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // We print out the estimated parameters.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  for ( unsigned int i = 0 ; i < 2 ; ++i )
+    {
+    std::cout << "class[" << i << "] " << std::endl;
+    std::cout << "    estimated mean : " 
+              << *(meanEstimators[i]->GetOutput()) 
+              << "    covariance matrix : " 
+              << *(covarianceEstimators[i]->GetOutput()) << std::endl;
+    }
+  // Software Guide : EndCodeSnippet
+  
+  // Software Guide : BeginLatex
+  //
+  // After creating a SampleClassifier object and a
+  // MaximumRatioDecisionRule object, we plug in the
+  // \code{decisionRule} and the \code{sample} to the classifier. Then,
+  // we specify the number of classes that will be considered using
+  // the \code{SetNumberOfClasses()} method. 
+  //
+  // The MaximumRatioDecisionRule requires a vector of \emph{a
+  // priori} probability values. Such \emph{a priori} probability will
+  // be the $P(\omega_{i})$ of the following variation of the Bayes
+  // decision rule: 
+  // \begin{equation}
+  //   \textrm{Decide } \omega_{i} \textrm{ if }
+  //   \frac{p(\overrightarrow{x}|\omega_{i})}
+  //        {p(\overrightarrow{x}|\omega_{j})}
+  // > \frac{P(\omega_{j})}{P(\omega_{i})} \textrm{ for all } j \not= i
+  //   \label{eq:bayes2}
+  // \end{equation}
+  // 
+  // The remainder of the code snippet shows how to use user-specified class
+  // labels. The classification result will be stored in a
+  // MembershipSample object, and for each measurement vector, its
+  // class label will be one of the two class labels, 100 and 200
+  // (\code{unsigned int}).
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::GaussianDensityFunction< MeasurementVectorType > 
+    MembershipFunctionType;
+  typedef itk::MaximumRatioDecisionRule DecisionRuleType;
+  DecisionRuleType::Pointer decisionRule = DecisionRuleType::New();
+
+  DecisionRuleType::APrioriVectorType aPrioris;
+  aPrioris.push_back( classSamples[0]->GetTotalFrequency() 
+                      / sample->GetTotalFrequency() ) ; 
+  aPrioris.push_back( classSamples[1]->GetTotalFrequency() 
+                      / sample->GetTotalFrequency() ) ; 
+  decisionRule->SetAPriori( aPrioris );
+
+  typedef itk::Statistics::SampleClassifier< SampleType > ClassifierType;
+  ClassifierType::Pointer classifier = ClassifierType::New();
+
+  classifier->SetDecisionRule( (itk::DecisionRuleBase::Pointer) decisionRule);
+  classifier->SetSample( sample );
+  classifier->SetNumberOfClasses( 2 );
+
+  std::vector< unsigned int > classLabels;
+  classLabels.resize( 2 );
+  classLabels[0] = 100;
+  classLabels[1] = 200;
+  classifier->SetMembershipFunctionClassLabels(classLabels);
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The \code{classifier} is almost ready to perform the classification
+  // except that it needs two membership functions that represent the two
+  // clusters.
+  //
+  // In this example, we can imagine that the two clusters are modeled by two
+  // Euclidean distance functions. The distance function (model) has only one
+  // parameter, the mean (centroid) set by the \code{SetOrigin()} method. To
+  // plug-in two distance functions, we call the
+  // \code{AddMembershipFunction()} method. Then invocation of the
+  // \code{Update()} method will perform the classification.
+  // 
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  std::vector< MembershipFunctionType::Pointer > membershipFunctions;
+  for ( unsigned int i = 0 ; i < 2 ; i++ ) 
+    {
+    membershipFunctions.push_back(MembershipFunctionType::New());
+    membershipFunctions[i]->SetMean( meanEstimators[i]->GetOutput() );
+    membershipFunctions[i]->
+      SetCovariance( covarianceEstimators[i]->GetOutput() );
+    classifier->AddMembershipFunction(membershipFunctions[i].GetPointer());
+    }
+
+  classifier->Update();
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The following code snippet prints out pairs of a measurement vector and
+  // its class label in the \code{sample}.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  ClassifierType::OutputType* membershipSample = classifier->GetOutput();
+  ClassifierType::OutputType::ConstIterator iter = membershipSample->Begin();
+
+  while ( iter != membershipSample->End() )
+    {
+    std::cout << "measurement vector = " << iter.GetMeasurementVector()
+              << "class label = " << iter.GetClassLabel() << std::endl;
+    ++iter;
+    }
+  // Software Guide : EndCodeSnippet
+
+  return 0;
+}
+
+
+
+
+
+
+
diff --git a/Examples/Classification/CMakeLists.txt b/Examples/Classification/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6ef3c16775444ddd7bb7d53f9a84db125a62a29b
--- /dev/null
+++ b/Examples/Classification/CMakeLists.txt
@@ -0,0 +1,108 @@
+PROJECT(StatisticsExamples)
+INCLUDE_REGULAR_EXPRESSION("^.*$")
+
+ADD_EXECUTABLE(KdTreeBasedKMeansClustering KdTreeBasedKMeansClustering.cxx )
+TARGET_LINK_LIBRARIES(KdTreeBasedKMeansClustering OTBCommon ITKCommon ITKStatistics)
+
+ADD_EXECUTABLE(ScalarImageKmeansClassifier ScalarImageKmeansClassifier.cxx )
+TARGET_LINK_LIBRARIES(ScalarImageKmeansClassifier OTBIO OTBCommon ITKIO ITKCommon )
+
+ADD_EXECUTABLE(ScalarImageKmeansModelEstimator ScalarImageKmeansModelEstimator.cxx )
+TARGET_LINK_LIBRARIES(ScalarImageKmeansModelEstimator  OTBIO OTBCommon ITKIO ITKCommon )
+
+ADD_EXECUTABLE(BayesianPluginClassifier BayesianPluginClassifier.cxx )
+TARGET_LINK_LIBRARIES(BayesianPluginClassifier OTBIO OTBCommon ITKCommon ITKStatistics)
+
+ADD_EXECUTABLE(ExpectationMaximizationMixtureModelEstimator ExpectationMaximizationMixtureModelEstimator.cxx )
+TARGET_LINK_LIBRARIES(ExpectationMaximizationMixtureModelEstimator OTBIO OTBCommon ITKCommon ITKStatistics)
+
+ADD_EXECUTABLE(ScalarImageMarkovRandomField1 ScalarImageMarkovRandomField1.cxx )
+TARGET_LINK_LIBRARIES(ScalarImageMarkovRandomField1 OTBIO OTBCommon ITKIO ITKCommon )
+
+
+#ADD_EXECUTABLE( BayesianClassifierInitializer BayesianClassifierInitializer.cxx )
+#TARGET_LINK_LIBRARIES(BayesianClassifierInitializer ITKCommon ITKStatistics ITKIO)
+
+#ADD_EXECUTABLE( BayesianClassifier BayesianClassifier.cxx )
+#TARGET_LINK_LIBRARIES(BayesianClassifier ITKCommon ITKStatistics ITKIO)
+
+#ADD_EXECUTABLE(EuclideanDistance EuclideanDistance.cxx )
+#TARGET_LINK_LIBRARIES(EuclideanDistance ITKCommon)
+
+
+#ADD_EXECUTABLE(GaussianDensityFunction GaussianDensityFunction.cxx )
+#TARGET_LINK_LIBRARIES(GaussianDensityFunction ITKCommon)
+
+#ADD_EXECUTABLE(Histogram Histogram.cxx )
+#TARGET_LINK_LIBRARIES(Histogram ITKCommon ITKStatistics)
+
+#ADD_EXECUTABLE(ImageToListAdaptor ImageToListAdaptor.cxx )
+#TARGET_LINK_LIBRARIES(ImageToListAdaptor ITKCommon)
+
+#ADD_EXECUTABLE(KdTree KdTree.cxx )
+#TARGET_LINK_LIBRARIES(KdTree ITKCommon)
+
+
+#ADD_EXECUTABLE(ListSample ListSample.cxx )
+#TARGET_LINK_LIBRARIES(ListSample ITKCommon)
+
+#ADD_EXECUTABLE(ListSampleToHistogramFilter ListSampleToHistogramFilter.cxx )
+#TARGET_LINK_LIBRARIES(ListSampleToHistogramFilter ITKCommon ITKStatistics)
+
+#ADD_EXECUTABLE(ListSampleToHistogramGenerator ListSampleToHistogramGenerator.cxx )
+#TARGET_LINK_LIBRARIES(ListSampleToHistogramGenerator ITKCommon ITKStatistics)
+
+#ADD_EXECUTABLE(MaximumDecisionRule MaximumDecisionRule.cxx )
+#TARGET_LINK_LIBRARIES(MaximumDecisionRule ITKCommon)
+
+#ADD_EXECUTABLE(MaximumRatioDecisionRule MaximumRatioDecisionRule.cxx )
+#TARGET_LINK_LIBRARIES(MaximumRatioDecisionRule ITKCommon)
+
+#ADD_EXECUTABLE(MembershipSample MembershipSample.cxx )
+#TARGET_LINK_LIBRARIES(MembershipSample ITKCommon)
+
+#ADD_EXECUTABLE(MinimumDecisionRule MinimumDecisionRule.cxx )
+#TARGET_LINK_LIBRARIES(MinimumDecisionRule ITKCommon)
+
+#ADD_EXECUTABLE(NeighborhoodSampler NeighborhoodSampler.cxx )
+#TARGET_LINK_LIBRARIES(NeighborhoodSampler ITKCommon)
+
+#ADD_EXECUTABLE(NormalVariateGenerator NormalVariateGenerator.cxx )
+#TARGET_LINK_LIBRARIES(NormalVariateGenerator ITKCommon ITKStatistics)
+
+#ADD_EXECUTABLE(PointSetToListAdaptor PointSetToListAdaptor.cxx )
+#TARGET_LINK_LIBRARIES(PointSetToListAdaptor ITKCommon)
+
+#ADD_EXECUTABLE(NormalVariateGenerator NormalVariateGenerator.cxx )
+#TARGET_LINK_LIBRARIES(NormalVariateGenerator ITKCommon)
+
+#ADD_EXECUTABLE(SampleStatistics SampleStatistics.cxx )
+#TARGET_LINK_LIBRARIES(SampleStatistics ITKCommon)
+
+#ADD_EXECUTABLE(SampleToHistogramProjectionFilter SampleToHistogramProjectionFilter.cxx )
+#TARGET_LINK_LIBRARIES(SampleToHistogramProjectionFilter ITKCommon
+#                                                        ITKStatistics)
+
+#ADD_EXECUTABLE(SampleSorting SampleSorting.cxx )
+#TARGET_LINK_LIBRARIES(SampleSorting ITKCommon)
+
+#ADD_EXECUTABLE(WeightedSampleStatistics WeightedSampleStatistics.cxx )
+#TARGET_LINK_LIBRARIES(WeightedSampleStatistics ITKCommon)
+
+#ADD_EXECUTABLE(ImageHistogram1 ImageHistogram1.cxx )
+#TARGET_LINK_LIBRARIES(ImageHistogram1 ITKIO ITKCommon ITKStatistics )
+
+#ADD_EXECUTABLE(ImageHistogram2 ImageHistogram2.cxx )
+#TARGET_LINK_LIBRARIES(ImageHistogram2 ITKIO ITKCommon ITKStatistics )
+
+#ADD_EXECUTABLE(ImageHistogram3 ImageHistogram3.cxx )
+#TARGET_LINK_LIBRARIES(ImageHistogram3 ITKIO ITKCommon ITKStatistics )
+
+#ADD_EXECUTABLE(ImageHistogram4 ImageHistogram4.cxx )
+#TARGET_LINK_LIBRARIES(ImageHistogram4 ITKIO ITKCommon ITKStatistics )
+
+#ADD_EXECUTABLE(ImageEntropy1 ImageEntropy1.cxx )
+#TARGET_LINK_LIBRARIES(ImageEntropy1 ITKIO ITKCommon ITKStatistics)
+
+
+
diff --git a/Examples/Classification/ExpectationMaximizationMixtureModelEstimator.cxx b/Examples/Classification/ExpectationMaximizationMixtureModelEstimator.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..603d25e76937dda210685638f823f589d97b176e
--- /dev/null
+++ b/Examples/Classification/ExpectationMaximizationMixtureModelEstimator.cxx
@@ -0,0 +1,268 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: ExpectationMaximizationMixtureModelEstimator.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/11/19 16:31:51 $
+  Version:   $Revision: 1.13 $
+
+     Copyright (c) Insight Software Consortium. All rights reserved.
+     See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+// Software Guide : BeginLatex
+//
+// \index{Statistics!Mixture model estimation}
+// \index{Statistics!Expectation maximization}
+//
+// \index{itk::Statistics::Gaussian\-Mixture\-Model\-Component}
+// \index{itk::Statistics::Expectation\-Maximization\-Mixture\-Model\-Estimator}
+//
+// In this example, we present ITK's implementation of the expectation
+// maximization (EM) process to generate parameter estimates for a two
+// Gaussian component mixture model.
+//
+// The Bayesian plug-in classifier example (see Section
+// \ref{sec:BayesianPluginClassifier}) used two Gaussian probability
+// density functions (PDF) to model two Gaussian distribution classes (two
+// models for two class). However, in some cases, we want to model a
+// distribution as a mixture of several different
+// distributions. Therefore, the probability density function ($p(x)$)
+// of a mixture model can be stated as follows :
+//
+// \begin{equation}
+//   p(x) = \sum^{c}_{i=0}\alpha_{i}f_{i}(x)
+// \end{equation}
+// where $i$ is the index of the component,
+//       $c$ is the number of components, 
+//       $\alpha_{i}$ is the proportion of the component, 
+//       and $f_{i}$ is the probability density function of the component.
+//
+// Now the task is to find the parameters(the component PDF's
+// parameters and the proportion values) to maximize the likelihood of
+// the parameters. If we know which component a measurement vector
+// belongs to, the solutions to this problem is easy to solve.
+// However, we don't know the membership of each measurement
+// vector. Therefore, we use the expectation of membership instead of
+// the exact membership. The EM process splits into two steps:
+// \begin{enumerate}
+//    \item{ E step: calculate the expected membership values for each
+// measurement vector to each classes.}
+//    \item{ M step: find the next parameter sets that maximize the
+// likelihood with the expected membership values and the current set of
+// parameters.}
+// \end{enumerate}
+//
+// The E step is basically a step that calculates the \emph{a posteriori}
+// probability for each measurement vector.
+//
+// The M step is dependent on the type of each PDF. Most of
+// distributions belonging to exponential family such as Poisson,
+// Binomial, Exponential, and Normal distributions have analytical
+// solutions for updating the parameter set. The
+// \subdoxygen{itk::Statistics}{ExpectationMaximizationMixtureModelEstimator}
+// class assumes that such type of components.
+//
+// In the following example we use the \subdoxygen{itk::Statistics}{ListSample} as
+// the sample (test and training). The \subdoxygen{itk::Vector} is our measurement
+// vector class. To store measurement vectors into two separate sample
+// container, we use the \subdoxygen{itk::Statistics}{Subsample} objects.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+#include "itkVector.h"
+#include "itkListSample.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// The following two files provide us the parameter estimation algorithms.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkGaussianMixtureModelComponent.h"
+#include "itkExpectationMaximizationMixtureModelEstimator.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// We will fill the sample with random variables from two normal 
+// distribution using the \subdoxygen{itk::Statistics}{NormalVariateGenerator}.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkNormalVariateGenerator.h"
+// Software Guide : EndCodeSnippet
+
+int main()
+{
+  // Software Guide : BeginLatex
+  //
+  // Since the NormalVariateGenerator class only supports 1-D, we
+  // define our measurement vector type as a one component vector. We
+  // then, create a ListSample object for data inputs.
+  // 
+  // We also create two Subsample objects that will store the
+  // measurement vectors in the \code{sample} into two separate sample
+  // containers. Each Subsample object stores only the
+  // measurement vectors belonging to a single class. This
+  // \textit{class sample} will be used by the parameter estimation
+  // algorithms.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  unsigned int numberOfClasses = 2;
+  typedef itk::Vector< double, 1 > MeasurementVectorType;
+  typedef itk::Statistics::ListSample< MeasurementVectorType > SampleType;
+  SampleType::Pointer sample = SampleType::New();
+  sample->SetMeasurementVectorSize( 1 ); // length of measurement vectors
+                                         // in the sample.
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The following code snippet creates a NormalVariateGenerator object.
+  // Since the random variable generator returns values according to the
+  // standard normal distribution (the mean is zero, and the standard
+  // deviation is one) before pushing random values into the \code{sample},
+  // we change the mean and standard deviation. We want two normal (Gaussian)
+  // distribution data. We have two for loops. Each for loop uses different
+  // mean and standard deviation. Before we fill the \code{sample} with the
+  // second distribution data, we call \code{Initialize()} method to recreate
+  // the pool of random variables in the \code{normalGenerator}. In the
+  // second for loop, we fill the two class samples with measurement vectors
+  // using the \code{AddInstance()} method.
+  //
+  // To see the probability density plots from the two distribution,
+  // refer to Figure~\ref{fig:TwoNormalDensityFunctionPlot}.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::NormalVariateGenerator NormalGeneratorType;
+  NormalGeneratorType::Pointer normalGenerator = NormalGeneratorType::New();
+
+  normalGenerator->Initialize( 101 );
+
+  MeasurementVectorType mv;
+  double mean = 100;
+  double standardDeviation = 30;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv[0] = ( normalGenerator->GetVariate() * standardDeviation ) + mean;
+    sample->PushBack( mv );
+    }
+
+  normalGenerator->Initialize( 3024 );
+  mean = 200;
+  standardDeviation = 30;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv[0] = ( normalGenerator->GetVariate() * standardDeviation ) + mean;
+    sample->PushBack( mv );
+    }
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // In the following code snippet notice that the template argument
+  // for the MeanCalculator and CovarianceCalculator is
+  // \code{ClassSampleType} (i.e., type of Subsample) instead of
+  // \code{SampleType} (i.e., type of ListSample). This is because the
+  // parameter estimation algorithms are applied to the class sample. 
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Array< double > ParametersType;
+  ParametersType params( 2 );
+
+  std::vector< ParametersType > initialParameters( numberOfClasses );
+  params[0] = 110.0;
+  params[1] = 800.0;
+  initialParameters[0] = params;
+
+  params[0] = 210.0;
+  params[1] = 850.0;
+  initialParameters[1] = params;
+
+  typedef itk::Statistics::GaussianMixtureModelComponent< SampleType > 
+    ComponentType;
+
+  std::vector< ComponentType::Pointer > components;
+  for ( unsigned int i = 0 ; i < numberOfClasses ; i++ )
+    {
+    components.push_back( ComponentType::New() );
+    (components[i])->SetSample( sample );
+    (components[i])->SetParameters( initialParameters[i] );
+    }
+  // Software Guide : EndCodeSnippet
+  
+  // Software Guide : BeginLatex
+  //
+  // We run the estimator.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::ExpectationMaximizationMixtureModelEstimator< 
+                           SampleType > EstimatorType;
+  EstimatorType::Pointer estimator = EstimatorType::New();
+
+  estimator->SetSample( sample );
+  estimator->SetMaximumIteration( 200 );
+
+  itk::Array< double > initialProportions(numberOfClasses);
+  initialProportions[0] = 0.5;
+  initialProportions[1] = 0.5;
+
+  estimator->SetInitialProportions( initialProportions );
+  
+  for ( unsigned int i = 0 ; i < numberOfClasses ; i++)
+    {
+    estimator->AddComponent( (ComponentType::Superclass*)
+                             (components[i]).GetPointer() );
+    }
+  
+  estimator->Update();
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // We then print out the estimated parameters.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  for ( unsigned int i = 0 ; i < numberOfClasses ; i++ )
+    {
+    std::cout << "Cluster[" << i << "]" << std::endl;
+    std::cout << "    Parameters:" << std::endl;
+    std::cout << "         " << (components[i])->GetFullParameters() 
+              << std::endl;
+    std::cout << "    Proportion: ";
+    std::cout << "         " << (*estimator->GetProportions())[i] << std::endl;
+    }
+  // Software Guide : EndCodeSnippet
+  
+  return 0;
+}
+
+
+
+
+
+
+
diff --git a/Examples/Classification/KdTreeBasedKMeansClustering.cxx b/Examples/Classification/KdTreeBasedKMeansClustering.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..f1ded1b2167d96bc3fa86b898b998497dc0666aa
--- /dev/null
+++ b/Examples/Classification/KdTreeBasedKMeansClustering.cxx
@@ -0,0 +1,383 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: KdTreeBasedKMeansClustering.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/11/19 16:31:51 $
+  Version:   $Revision: 1.19 $
+
+  Copyright (c) Insight Software Consortium. All rights reserved.
+  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+// Software Guide : BeginLatex
+// \index{Statistics!k-means clustering (using k-d tree)}
+//
+// \index{itk::Statistics::KdTree\-Based\-Kmeans\-Estimator}
+//
+// K-means clustering is a popular clustering algorithm because it is simple
+// and usually converges to a reasonable solution. The k-means algorithm
+// works as follows:
+//
+// \begin{enumerate}
+//   \item{Obtains the initial k means input from the user.} 
+//   \item{Assigns each measurement vector in a sample container to its
+// closest mean among the k number of means (i.e., update the membership of
+// each measurement vectors to the nearest of the k clusters).}
+//   \item{Calculates each cluster's mean from the newly assigned
+// measurement vectors (updates the centroid (mean) of k clusters).}
+//   \item{Repeats step 2 and step 3 until it meets the termination
+// criteria.}
+// \end{enumerate}
+//
+// The most common termination criteria is that if there is no
+// measurement vector that changes its cluster membership from the
+// previous iteration, then the algorithm stops.
+//
+// The \subdoxygen{itk::Statistics}{KdTreeBasedKmeansEstimator} is a variation of
+// this logic. The k-means clustering algorithm is computationally very
+// expensive because it has to recalculate the mean at each iteration. To
+// update the mean values, we have to calculate the distance between k means
+// and each and every measurement vector. To reduce the computational burden,
+// the KdTreeBasedKmeansEstimator uses a special data structure: the
+// k-d tree (\subdoxygen{itk::Statistics}{KdTree}) with additional
+// information. The additional information includes the number and the vector
+// sum of measurement vectors under each node under the tree architecture.
+//
+// With such additional information and the k-d tree data structure,
+// we can reduce the computational cost of the distance calculation
+// and means. Instead of calculating each measurement vectors and k
+// means, we can simply compare each node of the k-d tree and the k
+// means. This idea of utilizing a k-d tree can be found in multiple
+// articles \cite{Alsabti1998} \cite{Pelleg1999}
+// \cite{Kanungo2000}. Our implementation of this scheme follows the
+// article by the Kanungo et al \cite{Kanungo2000}.
+//
+// We use the \subdoxygen{itk::Statistics}{ListSample} as the input sample, the
+// \doxygen{itk::Vector} as the measurement vector. The following code
+// snippet includes their header files.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+#include "itkVector.h"
+#include "itkListSample.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// Since this k-means algorithm requires a \subdoxygen{itk::Statistics}{KdTree}
+// object as an input, we include the KdTree class header file. As mentioned
+// above, we need a k-d tree with the vector sum and the number of
+// measurement vectors. Therefore we use the
+// \subdoxygen{itk::Statistics}{WeightedCentroidKdTreeGenerator} instead of the
+// \subdoxygen{itk::Statistics}{KdTreeGenerator} that generate a k-d tree without
+// such additional information.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkKdTree.h"
+#include "itkWeightedCentroidKdTreeGenerator.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// The KdTreeBasedKmeansEstimator class is the implementation of the
+// k-means algorithm. It does not create k clusters. Instead, it
+// returns the mean estimates for the k clusters.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkKdTreeBasedKmeansEstimator.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// To generate the clusters, we must create k instances of
+// \subdoxygen{itk::Statistics}{EuclideanDistance} function as the membership
+// functions for each cluster and plug that---along with a sample---into an
+// \subdoxygen{itk::Statistics}{SampleClassifier} object to get a
+// \subdoxygen{itk::Statistics}{MembershipSample} that stores pairs of measurement
+// vectors and their associated class labels (k labels).
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkMinimumDecisionRule.h"
+#include "itkEuclideanDistance.h"
+#include "itkSampleClassifier.h"
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// We will fill the sample with random variables from two normal
+// distribution using the \subdoxygen{itk::Statistics}{NormalVariateGenerator}.
+//
+// Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "itkNormalVariateGenerator.h"
+// Software Guide : EndCodeSnippet
+
+int main()
+{
+  // Software Guide : BeginLatex
+  //
+  // Since the \code{NormalVariateGenerator} class only supports 1-D, we
+  // define our measurement vector type as one component vector. We
+  // then, create a \code{ListSample} object for data inputs. Each 
+  // measurement vector is of length 1. We set this using the
+  // \code{SetMeasurementVectorSize()} method.
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Vector< double, 1 > MeasurementVectorType;
+  typedef itk::Statistics::ListSample< MeasurementVectorType > SampleType;
+  SampleType::Pointer sample = SampleType::New();
+  sample->SetMeasurementVectorSize( 1 );
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The following code snippet creates a NormalVariateGenerator
+  // object. Since the random variable generator returns values
+  // according to the standard normal distribution (The mean is zero,
+  // and the standard deviation is one), before pushing random values
+  // into the \code{sample}, we change the mean and standard
+  // deviation. We want two normal (Gaussian) distribution data. We have
+  // two for loops. Each \code{for} loop uses different mean and standard
+  // deviation. Before we fill the \code{sample} with the second
+  // distribution data, we call \code{Initialize(random seed)} method,
+  // to recreate the pool of random variables in the
+  // \code{normalGenerator}.
+  //
+  // To see the probability density plots from the two distribution,
+  // refer to the Figure~\ref{fig:TwoNormalDensityFunctionPlot}.
+  //
+  // \begin{figure} 
+  //   \center
+  //   \includegraphics[width=0.8\textwidth]{TwoNormalDensityFunctionPlot.eps}
+  //   \itkcaption[Two normal distributions plot]{Two normal distributions' probability density plot
+  // (The means are 100 and 200, and the standard deviation is 30 )}
+  //  \protect\label{fig:TwoNormalDensityFunctionPlot}
+  // \end{figure}
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::NormalVariateGenerator NormalGeneratorType;
+  NormalGeneratorType::Pointer normalGenerator = NormalGeneratorType::New();
+
+  normalGenerator->Initialize( 101 );
+
+  MeasurementVectorType mv;
+  double mean = 100;
+  double standardDeviation = 30;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv[0] = ( normalGenerator->GetVariate() * standardDeviation ) + mean;
+    sample->PushBack( mv );
+    }
+
+  normalGenerator->Initialize( 3024 );
+  mean = 200;
+  standardDeviation = 30;
+  for ( unsigned int i = 0 ; i < 100 ; ++i )
+    {
+    mv[0] = ( normalGenerator->GetVariate() * standardDeviation ) + mean;
+    sample->PushBack( mv );
+    }
+  // Software Guide : EndCodeSnippet
+
+
+  // Software Guide : BeginLatex
+  //
+  // We create a k-d tree. %To see the details on the k-d tree generation, see
+  // %the Section~\ref{sec:KdTree}.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::WeightedCentroidKdTreeGenerator< SampleType > 
+    TreeGeneratorType;
+  TreeGeneratorType::Pointer treeGenerator = TreeGeneratorType::New();
+  
+  treeGenerator->SetSample( sample );
+  treeGenerator->SetBucketSize( 16 );
+  treeGenerator->Update();
+  // Software Guide : EndCodeSnippet
+
+
+  // Software Guide : BeginLatex
+  //
+  // Once we have the k-d tree, it is a simple procedure to produce k
+  // mean estimates. 
+  //
+  // We create the KdTreeBasedKmeansEstimator. Then, we provide the initial
+  // mean values using the \code{SetParameters()}. Since we are dealing with
+  // two normal distribution in a 1-D space, the size of the mean value array
+  // is two. The first element is the first mean value, and the second is the
+  // second mean value. If we used two normal distributions in a 2-D space,
+  // the size of array would be four, and the first two elements would be the
+  // two components of the first normal distribution's mean vector. We
+  // plug-in the k-d tree using the \code{SetKdTree()}.
+  //
+  // The remaining two methods specify the termination condition. The
+  // estimation process stops when the number of iterations reaches the
+  // maximum iteration value set by the \code{SetMaximumIteration()}, or the
+  // distances between the newly calculated mean (centroid) values and
+  // previous ones are within the threshold set by the
+  // \code{SetCentroidPositionChangesThreshold()}. The final step is
+  // to call the \code{StartOptimization()} method.
+  //
+  // The for loop will print out the mean estimates from the estimation
+  // process. 
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef TreeGeneratorType::KdTreeType TreeType;
+  typedef itk::Statistics::KdTreeBasedKmeansEstimator<TreeType> EstimatorType;
+  EstimatorType::Pointer estimator = EstimatorType::New();
+
+  EstimatorType::ParametersType initialMeans(2);
+  initialMeans[0] = 0.0;
+  initialMeans[1] = 0.0;
+
+  estimator->SetParameters( initialMeans );
+  estimator->SetKdTree( treeGenerator->GetOutput() );
+  estimator->SetMaximumIteration( 200 );
+  estimator->SetCentroidPositionChangesThreshold(0.0);
+  estimator->StartOptimization();
+
+  EstimatorType::ParametersType estimatedMeans = estimator->GetParameters();
+
+  for ( unsigned int i = 0 ; i < 2 ; ++i )
+    {
+    std::cout << "cluster[" << i << "] " << std::endl;
+    std::cout << "    estimated mean : " << estimatedMeans[i] << std::endl;
+    }
+  // Software Guide : EndCodeSnippet
+
+
+  // Software Guide : BeginLatex
+  //
+  // If we are only interested in finding the mean estimates, we might
+  // stop. However, to illustrate how a classifier can be formed using
+  // the statistical classification framework. We go a little bit
+  // further in this example.
+  //
+  // Since the k-means algorithm is an minimum distance classifier using
+  // the estimated k means and the measurement vectors. We use the
+  // EuclideanDistance class as membership functions. Our choice
+  // for the decision rule is the
+  // \subdoxygen{itk::Statistics}{MinimumDecisionRule} that returns the
+  // index of the membership functions that have the smallest value for
+  // a measurement vector.
+  //
+  // After creating a SampleClassifier object and a MinimumDecisionRule
+  // object, we plug-in the \code{decisionRule} and the \code{sample} to the
+  // classifier. Then, we must specify the number of classes that will be
+  // considered using the \code{SetNumberOfClasses()} method.
+  //
+  // The remainder of the following code snippet shows how to use
+  // user-specified class labels. The classification result will be stored in
+  // a MembershipSample object, and for each measurement vector, its
+  // class label will be one of the two class labels, 100 and 200
+  // (\code{unsigned int}).
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::EuclideanDistance< MeasurementVectorType > 
+    MembershipFunctionType;
+  typedef itk::MinimumDecisionRule DecisionRuleType;
+  DecisionRuleType::Pointer decisionRule = DecisionRuleType::New();
+  
+  typedef itk::Statistics::SampleClassifier< SampleType > ClassifierType;
+  ClassifierType::Pointer classifier = ClassifierType::New();
+
+  classifier->SetDecisionRule( (itk::DecisionRuleBase::Pointer) decisionRule);
+  classifier->SetSample( sample );
+  classifier->SetNumberOfClasses( 2 );
+
+  std::vector< unsigned int > classLabels;
+  classLabels.resize( 2 );
+  classLabels[0] = 100;
+  classLabels[1] = 200;
+
+  classifier->SetMembershipFunctionClassLabels( classLabels );
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The \code{classifier} is almost ready to do the classification
+  // process except that it needs two membership functions that
+  // represents two clusters respectively.
+  //
+  // In this example, the two clusters are modeled by two Euclidean distance
+  // functions. The distance function (model) has only one parameter, its mean
+  // (centroid) set by the \code{SetOrigin()} method. To plug-in two distance
+  // functions, we call the \code{AddMembershipFunction()} method. Then
+  // invocation of the \code{Update()} method will perform the
+  // classification.
+  // 
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  std::vector< MembershipFunctionType::Pointer > membershipFunctions;
+  MembershipFunctionType::OriginType origin( sample->GetMeasurementVectorSize() );
+  int index = 0;
+  for ( unsigned int i = 0 ; i < 2 ; i++ ) 
+    {
+    membershipFunctions.push_back( MembershipFunctionType::New() );
+    for ( unsigned int j = 0 ; j < sample->GetMeasurementVectorSize(); j++ )
+      {
+      origin[j] = estimatedMeans[index++];
+      }
+    membershipFunctions[i]->SetOrigin( origin );
+    classifier->AddMembershipFunction( membershipFunctions[i].GetPointer() );
+    }
+
+  classifier->Update();
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The following code snippet prints out the measurement vectors and
+  // their class labels in the \code{sample}.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  ClassifierType::OutputType* membershipSample = classifier->GetOutput();
+  ClassifierType::OutputType::ConstIterator iter = membershipSample->Begin();
+
+  while ( iter != membershipSample->End() )
+    {
+    std::cout << "measurement vector = " << iter.GetMeasurementVector()
+              << "class label = " << iter.GetClassLabel()
+              << std::endl;
+    ++iter;
+    }
+  // Software Guide : EndCodeSnippet
+
+  return 0;
+}
+
+
+
+
+
+
+
diff --git a/Examples/Classification/ScalarImageKmeansClassifier.cxx b/Examples/Classification/ScalarImageKmeansClassifier.cxx
new file mode 100755
index 0000000000000000000000000000000000000000..79a1a4d9c01564e48b0039daec6bd1f4b27e37d6
--- /dev/null
+++ b/Examples/Classification/ScalarImageKmeansClassifier.cxx
@@ -0,0 +1,261 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: ScalarImageKmeansClassifier.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/12/10 19:34:53 $
+  Version:   $Revision: 1.12 $
+
+  Copyright (c) Insight Software Consortium. All rights reserved.
+  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+//  Software Guide : BeginCommandLineArgs
+//    INPUTS: {QB_Suburb.png}
+//    OUTPUTS: {QB_Suburb_labelled.png}
+//    0 4 79.5097 138.136 213.846 25.9395
+//    NORMALIZE_EPS_OUTPUT_OF: {QB_Suburb_labelled.png}
+//  Software Guide : EndCommandLineArgs
+
+// Software Guide : BeginLatex
+//
+// This example shows how to use the KMeans model for classifying the pixel of
+// a scalar image.
+//
+// The  \subdoxygen{itk::Statistics}{ScalarImageKmeansImageFilter} is used for taking
+// a scalar image and applying the K-Means algorithm in order to define classes
+// that represents statistical distributions of intensity values in the pixels.
+// The classes are then used in this filter for generating a labeled image where
+// every pixel is assigned to one of the classes.
+//
+// Software Guide : EndLatex 
+
+
+// Software Guide : BeginCodeSnippet
+#include "otbImage.h"
+#include "otbImageFileReader.h"
+#include "otbImageFileWriter.h"
+#include "itkScalarImageKmeansImageFilter.h"
+// Software Guide : EndCodeSnippet
+
+int main( int argc, char * argv [] )
+{
+  if( argc < 5 )
+    {
+    std::cerr << "Usage: " << std::endl;
+    std::cerr << argv[0];
+    std::cerr << " inputScalarImage outputLabeledImage contiguousLabels";
+    std::cerr << " numberOfClasses mean1 mean2... meanN " << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  const char * inputImageFileName = argv[1];
+
+
+
+// Software Guide : BeginLatex
+//
+// First we define the pixel type and dimension of the image that we intend to
+// classify. With this image type we can also declare the
+// \doxygen{otb::ImageFileReader} needed for reading the input image, create one and
+// set its input filename.
+//
+// Software Guide : EndLatex 
+  
+// Software Guide : BeginCodeSnippet
+  typedef signed short       PixelType;
+  const unsigned int          Dimension = 2;
+
+  typedef otb::Image<PixelType, Dimension > ImageType;
+
+  typedef otb::ImageFileReader< ImageType > ReaderType;
+  ReaderType::Pointer reader = ReaderType::New();
+  reader->SetFileName( inputImageFileName );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// With the \code{ImageType} we instantiate the type of the
+// \doxygen{itk::ScalarImageKmeansImageFilter} that will compute the K-Means model
+// and then classify the image pixels. 
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef itk::ScalarImageKmeansImageFilter< ImageType > KMeansFilterType;
+
+  KMeansFilterType::Pointer kmeansFilter = KMeansFilterType::New();
+
+  kmeansFilter->SetInput( reader->GetOutput() );
+
+  const unsigned int numberOfInitialClasses = atoi( argv[4] );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// In general the classification will produce as output an image whose pixel
+// values are integers associated to the labels of the classes. Since typically
+// these integers will be generated in order (0,1,2,...N), the output image
+// will tend to look very dark when displayed with naive viewers. It is
+// therefore convenient to have the option of spreading the label values over
+// the dynamic range of the output image pixel type. When this is done, the
+// dynamic range of the pixels is divided by the number of classes in order to
+// define the increment between labels. For example, an output image of 8 bits
+// will have a dynamic range of [0:255], and when it is used for holding four
+// classes, the non-contiguous labels will be (0,64,128,192). The selection of
+// the mode to use is done with the method \code{SetUseContiguousLabels()}.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  const unsigned int useNonContiguousLabels = atoi( argv[3] );
+
+  kmeansFilter->SetUseNonContiguousLabels( useNonContiguousLabels );
+// Software Guide : EndCodeSnippet
+
+
+  const unsigned int argoffset = 5;
+
+  if( static_cast<unsigned int>(argc) <
+      numberOfInitialClasses + argoffset )
+    {
+    std::cerr << "Error: " << std::endl;
+    std::cerr << numberOfInitialClasses << " classes has been specified ";
+    std::cerr << "but no enough means have been provided in the command ";
+    std::cerr << "line arguments " << std::endl;
+    return EXIT_FAILURE;
+    }
+
+
+// Software Guide : BeginLatex
+//
+// For each one of the classes we must provide a tentative initial value for
+// the mean of the class. Given that this is a scalar image, each one of the
+// means is simply a scalar value. Note however that in a general case of
+// K-Means, the input image would be a vector image and therefore the means
+// will be vectors of the same dimension as the image pixels.
+//
+// Software Guide : EndLatex 
+
+
+// Software Guide : BeginCodeSnippet
+  for( unsigned k=0; k < numberOfInitialClasses; k++ )
+    {
+    const double userProvidedInitialMean = atof( argv[k+argoffset] );
+    kmeansFilter->AddClassWithInitialMean( userProvidedInitialMean );
+    }
+// Software Guide : EndCodeSnippet
+
+
+  const char * outputImageFileName = argv[2];
+
+
+// Software Guide : BeginLatex
+//
+// The \doxygen{itk::ScalarImageKmeansImageFilter} is predefined for producing an 8
+// bits scalar image as output. This output image contains labels associated
+// to each one of the classes in the K-Means algorithm. In the following lines
+// we use the \code{OutputImageType} in order to instantiate the type of a
+// \doxygen{otb::ImageFileWriter}. Then create one, and connect it to the output of
+// the classification filter.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef KMeansFilterType::OutputImageType  OutputImageType;
+
+  typedef otb::ImageFileWriter< OutputImageType > WriterType;
+
+  WriterType::Pointer writer = WriterType::New();
+  
+  writer->SetInput( kmeansFilter->GetOutput() );
+
+  writer->SetFileName( outputImageFileName );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// We are now ready for triggering the execution of the pipeline. This is done
+// by simply invoking the \code{Update()} method in the writer. This call will
+// propagate the update request to the reader and then to the classifier.
+//
+// Software Guide : EndLatex 
+
+
+// Software Guide : BeginCodeSnippet
+  try
+    {
+    writer->Update();
+    }
+  catch( itk::ExceptionObject & excp )
+    {
+    std::cerr << "Problem encountered while writing ";
+    std::cerr << " image file : " << argv[2] << std::endl;
+    std::cerr << excp << std::endl;
+    return EXIT_FAILURE;
+    }
+// Software Guide : EndCodeSnippet
+
+  
+
+
+// Software Guide : BeginLatex
+//
+// At this point the classification is done, the labeled image is saved in a
+// file, and we can take a look at the means that were found as a result of the
+// model estimation performed inside the classifier filter.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  KMeansFilterType::ParametersType estimatedMeans = 
+                                            kmeansFilter->GetFinalMeans();
+
+  const unsigned int numberOfClasses = estimatedMeans.Size();
+
+  for ( unsigned int i = 0 ; i < numberOfClasses ; ++i )
+    {
+    std::cout << "cluster[" << i << "] ";
+    std::cout << "    estimated mean : " << estimatedMeans[i] << std::endl;
+    }
+
+// Software Guide : EndCodeSnippet
+
+//  Software Guide : BeginLatex
+//  
+// \begin{figure} \center
+// \includegraphics[width=0.44\textwidth]{QB_Suburb.eps}
+// \includegraphics[width=0.44\textwidth]{QB_Suburb_labelled.eps}
+// \itkcaption[Output of the KMeans classifier]{Effect of the
+// KMeans classifier. Left: original image. Right: image of classes.}
+// \label{fig:ScalarImageKMeansClassifierOutput}
+// \end{figure}
+//
+//  Figure \ref{fig:ScalarImageKMeansClassifierOutput}
+//  illustrates the effect of this filter with three classes.
+//  The means can be estimated by ScalarImageKmeansModelEstimator.cxx.
+//
+//  Software Guide : EndLatex 
+
+  return EXIT_SUCCESS;
+  
+}
+
+
diff --git a/Examples/Classification/ScalarImageKmeansModelEstimator.cxx b/Examples/Classification/ScalarImageKmeansModelEstimator.cxx
new file mode 100755
index 0000000000000000000000000000000000000000..f0110bd5a6807c6bfd48947ade0ca5654591522a
--- /dev/null
+++ b/Examples/Classification/ScalarImageKmeansModelEstimator.cxx
@@ -0,0 +1,152 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: ScalarImageKmeansModelEstimator.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/11/20 13:27:55 $
+  Version:   $Revision: 1.7 $
+
+  Copyright (c) Insight Software Consortium. All rights reserved.
+  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+//  Software Guide : BeginCommandLineArgs
+//    INPUTS: {QB_Suburb.png}
+//  Software Guide : EndCommandLineArgs
+
+// Software Guide : BeginLatex
+//
+// This example shows how to compute the KMeans model of an Scalar Image.
+//
+// The  \subdoxygen{itk::Statistics}{KdTreeBasedKmeansEstimator} is used for taking
+// a scalar image and applying the K-Means algorithm in order to define classes
+// that represents statistical distributions of intensity values in the pixels.
+// One of the drawbacks of this technique is that the spatial
+// distribution of the pixels is not considered at all. It is common therefore
+// to combine the classification resulting from K-Means with other segmentation
+// techniques that will use the classification as a prior and add spatial
+// information to it in order to produce a better segmentation.
+//
+// Software Guide : EndLatex 
+
+
+#include "itkKdTree.h"
+#include "itkKdTreeBasedKmeansEstimator.h"
+#include "itkWeightedCentroidKdTreeGenerator.h"
+
+#include "itkMinimumDecisionRule.h"
+#include "itkEuclideanDistance.h"
+#include "itkSampleClassifier.h"
+
+#include "itkScalarImageToListAdaptor.h"
+
+#include "otbImage.h"
+#include "otbImageFileReader.h"
+
+int main( int argc, char * argv [] )
+{
+
+  if( argc < 2 )
+    {
+    std::cerr << "Missing command line arguments" << std::endl;
+    std::cerr << "Usage :  " << argv[0] << "  inputImageFileName " << std::endl;
+    return -1;
+    }
+
+
+  typedef unsigned char       PixelType;
+  const unsigned int          Dimension = 2;
+
+  typedef otb::Image<PixelType, Dimension > ImageType;
+
+  typedef otb::ImageFileReader< ImageType > ReaderType;
+
+  ReaderType::Pointer reader = ReaderType::New();
+
+  reader->SetFileName( argv[1] );
+
+  try
+    {
+    reader->Update();
+    }
+  catch( itk::ExceptionObject & excp )
+    {
+    std::cerr << "Problem encoutered while reading image file : " << argv[1] << std::endl;
+    std::cerr << excp << std::endl;
+    return -1;
+    }
+
+
+
+  // Software Guide : BeginCodeSnippet
+  
+  // Create a List from the scalar image
+  typedef itk::Statistics::ScalarImageToListAdaptor< ImageType >   AdaptorType;
+
+  AdaptorType::Pointer adaptor = AdaptorType::New();
+
+  adaptor->SetImage(  reader->GetOutput() );
+
+
+
+  // Define the Measurement vector type from the AdaptorType
+  typedef AdaptorType::MeasurementVectorType  MeasurementVectorType;
+
+
+  // Create the K-d tree structure
+  typedef itk::Statistics::WeightedCentroidKdTreeGenerator< 
+                                                      AdaptorType > 
+                                                              TreeGeneratorType;
+
+  TreeGeneratorType::Pointer treeGenerator = TreeGeneratorType::New();
+
+  treeGenerator->SetSample( adaptor );
+  treeGenerator->SetBucketSize( 16 );
+  treeGenerator->Update();
+
+
+
+  typedef TreeGeneratorType::KdTreeType TreeType;
+  typedef itk::Statistics::KdTreeBasedKmeansEstimator<TreeType> EstimatorType;
+
+  EstimatorType::Pointer estimator = EstimatorType::New();
+
+  const unsigned int numberOfClasses = 4;
+
+  EstimatorType::ParametersType initialMeans( numberOfClasses );
+  initialMeans[0] = 25.0;   
+  initialMeans[1] = 125.0;  
+  initialMeans[2] = 250.0;  
+
+  estimator->SetParameters( initialMeans );
+  
+  estimator->SetKdTree( treeGenerator->GetOutput() );
+  estimator->SetMaximumIteration( 200 );
+  estimator->SetCentroidPositionChangesThreshold(0.0);
+  estimator->StartOptimization();
+
+  EstimatorType::ParametersType estimatedMeans = estimator->GetParameters();
+
+  for ( unsigned int i = 0 ; i < numberOfClasses ; ++i )
+    {
+    std::cout << "cluster[" << i << "] " << std::endl;
+    std::cout << "    estimated mean : " << estimatedMeans[i] << std::endl;
+    }
+
+// Software Guide : EndCodeSnippet
+
+
+
+  return EXIT_SUCCESS;
+  
+}
+
+
diff --git a/Examples/Classification/ScalarImageMarkovRandomField1.cxx b/Examples/Classification/ScalarImageMarkovRandomField1.cxx
new file mode 100755
index 0000000000000000000000000000000000000000..ad246c39ae3b6320bd3c721fb2e760bb5b75ff9d
--- /dev/null
+++ b/Examples/Classification/ScalarImageMarkovRandomField1.cxx
@@ -0,0 +1,504 @@
+/*=========================================================================
+
+  Program:   Insight Segmentation & Registration Toolkit
+  Module:    $RCSfile: ScalarImageMarkovRandomField1.cxx,v $
+  Language:  C++
+  Date:      $Date: 2005/12/10 18:23:31 $
+  Version:   $Revision: 1.16 $
+
+  Copyright (c) Insight Software Consortium. All rights reserved.
+  See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details.
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+#if defined(_MSC_VER)
+#pragma warning ( disable : 4786 )
+#endif
+
+//
+//  Software Guide : BeginCommandLineArgs
+//    INPUTS: {QB_Suburb.png}, {QB_Suburb_labelled.png}
+//    OUTPUTS: {ScalarImageMarkovRandomField1Output.png}
+//    50 3 4 79.5097 138.136 213.846 25.9395
+//    NORMALIZE_EPS_OUTPUT_OF: {ScalarImageMarkovRandomField1Output.png}
+//  Software Guide : EndCommandLineArgs
+
+// Software Guide : BeginLatex
+//
+// This example shows how to use the Markov Random Field approach for
+// classifying the pixel of a scalar image.
+//
+// The  \subdoxygen{itk::Statistics}{MRFImageFilter} is used for refining an initial
+// classification by introducing the spatial coherence of the labels. The user
+// should provide two images as input. The first image is the one to be
+// classified while the second image is an image of labels representing an
+// initial classification.
+//
+// Software Guide : EndLatex 
+
+
+// Software Guide : BeginLatex
+//
+// The following headers are related to reading input images, writing the
+// output image, and making the necessary conversions between scalar and vector
+// images.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+#include "otbImage.h"
+#include "itkFixedArray.h"
+#include "otbImageFileReader.h"
+#include "otbImageFileWriter.h"
+#include "itkScalarToArrayCastImageFilter.h"
+// Software Guide : EndCodeSnippet
+
+#include "itkRescaleIntensityImageFilter.h"
+
+// Software Guide : BeginLatex
+//
+// The following headers are related to the statistical classification classes.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+#include "itkMRFImageFilter.h"
+#include "itkDistanceToCentroidMembershipFunction.h"
+#include "itkMinimumDecisionRule.h"
+#include "itkImageClassifierBase.h"
+// Software Guide : EndCodeSnippet
+
+int main( int argc, char * argv [] )
+{
+  if( argc < 5 )
+    {
+    std::cerr << "Usage: " << std::endl;
+    std::cerr << argv[0];
+    std::cerr << " inputScalarImage inputLabeledImage";
+    std::cerr << " outputLabeledImage numberOfIterations";
+    std::cerr << " smoothingFactor numberOfClasses";
+    std::cerr << " mean1 mean2 ... meanN " << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  const char * inputImageFileName      = argv[1];
+  const char * inputLabelImageFileName = argv[2];
+  const char * outputImageFileName     = argv[3];
+
+  const unsigned int numberOfIterations = atoi( argv[4] );
+  const double       smoothingFactor    = atof( argv[5] );
+  const unsigned int numberOfClasses    = atoi( argv[6] );
+
+  const unsigned int numberOfArgumentsBeforeMeans = 7;
+
+  if( static_cast<unsigned int>(argc) <
+      numberOfClasses + numberOfArgumentsBeforeMeans )
+    {
+    std::cerr << "Error: " << std::endl;
+    std::cerr << numberOfClasses << " classes has been specified ";
+    std::cerr << "but no enough means have been provided in the command ";
+    std::cerr << "line arguments " << std::endl;
+    return EXIT_FAILURE;
+    }
+
+
+
+// Software Guide : BeginLatex
+//
+// First we define the pixel type and dimension of the image that we intend to
+// classify. With this image type we can also declare the
+// \doxygen{otb::ImageFileReader} needed for reading the input image, create one and
+// set its input filename. 
+//
+// Software Guide : EndLatex 
+  
+// Software Guide : BeginCodeSnippet
+  typedef unsigned char        PixelType;
+  const unsigned int          Dimension = 2;
+
+  typedef otb::Image<PixelType, Dimension > ImageType;
+
+  typedef otb::ImageFileReader< ImageType > ReaderType;
+  ReaderType::Pointer reader = ReaderType::New();
+  reader->SetFileName( inputImageFileName );
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// As a second step we define the pixel type and dimension of the image of
+// labels that provides the initial classification of the pixels from the first
+// image. This initial labeled image can be the output of a K-Means method like
+// the one illustrated in section \ref{sec:KMeansClassifier}.
+//
+// Software Guide : EndLatex 
+  
+// Software Guide : BeginCodeSnippet
+  typedef unsigned char       LabelPixelType;
+
+  typedef otb::Image<LabelPixelType, Dimension > LabelImageType;
+
+  typedef otb::ImageFileReader< LabelImageType > LabelReaderType;
+  LabelReaderType::Pointer labelReader = LabelReaderType::New();
+  labelReader->SetFileName( inputLabelImageFileName );
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// Since the Markov Random Field algorithm is defined in general for images
+// whose pixels have multiple components, that is, images of vector type, we
+// must adapt our scalar image in order to satisfy the interface expected by
+// the \code{MRFImageFilter}. We do this by using the
+// \doxygen{itk::ScalarToArrayCastImageFilter}. With this filter we will present our
+// scalar image as a vector image whose vector pixels contain a single
+// component. 
+// 
+// Software Guide : EndLatex 
+  
+// Software Guide : BeginCodeSnippet
+  typedef itk::FixedArray<LabelPixelType,1>  ArrayPixelType;
+
+  typedef otb::Image< ArrayPixelType, Dimension > ArrayImageType;
+
+  typedef itk::ScalarToArrayCastImageFilter< 
+                     ImageType, ArrayImageType > ScalarToArrayFilterType;
+
+  ScalarToArrayFilterType::Pointer 
+    scalarToArrayFilter = ScalarToArrayFilterType::New();
+  scalarToArrayFilter->SetInput( reader->GetOutput() );
+// Software Guide : EndCodeSnippet
+
+
+// Software Guide : BeginLatex
+//
+// With the input image type \code{ImageType} and labeled image type
+// \code{LabelImageType} we instantiate the type of the
+// \doxygen{itk::MRFImageFilter} that will apply the Markov Random Field algorithm
+// in order to refine the pixel classification.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef itk::MRFImageFilter< ArrayImageType, LabelImageType > MRFFilterType;
+
+  MRFFilterType::Pointer mrfFilter = MRFFilterType::New();
+
+  mrfFilter->SetInput( scalarToArrayFilter->GetOutput() );
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// We set now some of the parameters for the MRF filter. In particular, the
+// number of classes to be used during the classification, the maximum number
+// of iterations to be run in this filter and the error tolerance that will be
+// used as a criterion for convergence.
+//
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  mrfFilter->SetNumberOfClasses( numberOfClasses );
+  mrfFilter->SetMaximumNumberOfIterations( numberOfIterations );
+  mrfFilter->SetErrorTolerance( 1e-7 );
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// The smoothing factor represents the tradeoff between fidelity to the 
+// observed image and the smoothness of the segmented image. Typical smoothing
+// factors have values between 1~5. This factor will multiply the weights that
+// define the influence of neighbors on the classification of a given pixel.
+// The higher the value, the more uniform will be the regions resulting from
+// the classification refinement.
+// 
+// Software Guide : EndLatex 
+ 
+// Software Guide : BeginCodeSnippet
+  mrfFilter->SetSmoothingFactor( smoothingFactor );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// Given that the MRF filter needs to continually relabel the pixels, it needs
+// access to a set of membership functions that will measure to what degree
+// every pixel belongs to a particular class.  The classification is performed
+// by the \doxygen{itk::ImageClassifierBase} class, that is instantiated using the
+// type of the input vector image and the type of the labeled image.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef itk::ImageClassifierBase< 
+                              ArrayImageType,
+                              LabelImageType >   SupervisedClassifierType;
+
+  SupervisedClassifierType::Pointer classifier = 
+                                         SupervisedClassifierType::New();
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// The classifier needs a decision rule to be set by the user. Note
+// that we must use \code{GetPointer()} in the call of the
+// \code{SetDecisionRule()} method because we are passing a
+// SmartPointer, and smart pointer cannot perform polymorphism, we
+// must then extract the raw pointer that is associated to the smart
+// pointer. This extraction is done with the GetPointer() method.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef itk::MinimumDecisionRule DecisionRuleType;
+
+  DecisionRuleType::Pointer  classifierDecisionRule = DecisionRuleType::New();
+
+  classifier->SetDecisionRule( classifierDecisionRule.GetPointer() );
+// Software Guide : EndCodeSnippet
+
+
+// Software Guide : BeginLatex
+//
+// We now instantiate the membership functions. In this case we use the
+// \subdoxygen{itk::Statistics}{DistanceToCentroidMembershipFunction} class
+// templated over the pixel type of the vector image, which in our example
+// happens to be a vector of dimension 1.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  typedef itk::Statistics::DistanceToCentroidMembershipFunction< 
+                                                    ArrayPixelType > 
+                                                       MembershipFunctionType;
+
+  typedef MembershipFunctionType::Pointer MembershipFunctionPointer;
+
+
+  double meanDistance = 0;
+  vnl_vector<double> centroid(1); 
+  for( unsigned int i=0; i < numberOfClasses; i++ )
+    {
+    MembershipFunctionPointer membershipFunction = 
+                                         MembershipFunctionType::New();
+
+    centroid[0] = atof( argv[i+numberOfArgumentsBeforeMeans] ); 
+
+    membershipFunction->SetCentroid( centroid );
+
+    classifier->AddMembershipFunction( membershipFunction );
+    meanDistance += static_cast< double > (centroid[0]);
+    }
+  meanDistance /= numberOfClasses;
+// Software Guide : EndCodeSnippet
+
+// Software Guide : BeginLatex
+//
+// We set the Smoothing factor. This factor will multiply the weights that
+// define the influence of neighbors on the classification of a given pixel.
+// The higher the value, the more uniform will be the regions resulting from
+// the classification refinement.
+// 
+// Software Guide : EndLatex 
+ 
+// Software Guide : BeginCodeSnippet
+  mrfFilter->SetSmoothingFactor( smoothingFactor );
+// Software Guide : EndCodeSnippet
+
+
+
+// Software Guide : BeginLatex
+//
+// and we set the neighborhood radius that will define the size of the clique
+// to be used in the computation of the neighbors' influence in the
+// classification of any given pixel. Note that despite the fact that we call
+// this a radius, it is actually the half size of an hypercube. That is, the
+// actual region of influence will not be circular but rather an N-Dimensional
+// box. For example, a neighborhood radius of 2 in a 3D image will result in a
+// clique of size 5x5x5 pixels, and a radius of 1 will result in a clique of
+// size 3x3x3 pixels.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  mrfFilter->SetNeighborhoodRadius( 1 );
+// Software Guide : EndCodeSnippet
+
+
+// Software Guide : BeginLatex
+//
+// We should now set the weights used for the neighbors. This is done by
+// passing an array of values that contains the linear sequence of weights for
+// the neighbors. For example, in a neighborhood of size 3x3x3, we should
+// provide a linear array of 9 weight values. The values are packaged in a
+// \code{std::vector} and are supposed to be \code{double}. The following lines
+// illustrate a typical set of values for a 3x3x3 neighborhood. The array is
+// arranged and then passed to the filter by using the method
+// \code{SetMRFNeighborhoodWeight()}.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  std::vector< double > weights;
+  weights.push_back(1.5);
+  weights.push_back(2.0);
+  weights.push_back(1.5);
+  weights.push_back(2.0);
+  weights.push_back(0.0); // This is the central pixel
+  weights.push_back(2.0);
+  weights.push_back(1.5);
+  weights.push_back(2.0);
+  weights.push_back(1.5);
+// Software Guide : EndCodeSnippet
+
+
+// Software Guide : BeginLatex
+// We now scale weights so that the smoothing function and the image fidelity
+// functions have comparable value. This is necessary since the label
+// image and the input image can have different dynamic ranges. The fidelity
+// function is usually computed using a distance function, such as the
+// \doxygen{itk::DistanceToCentroidMembershipFunction} or one of the other 
+// membership functions. They tend to have values in the order of the means
+// specified. 
+// Software Guide : EndLatex 
+    
+// Software Guide : BeginCodeSnippet
+  double totalWeight = 0;
+  for(std::vector< double >::const_iterator wcIt = weights.begin(); 
+      wcIt != weights.end(); ++wcIt )
+    {
+    totalWeight += *wcIt;
+    }
+  for(std::vector< double >::iterator wIt = weights.begin(); 
+      wIt != weights.end(); wIt++ )
+    {
+    *wIt = static_cast< double > ( (*wIt) * meanDistance / (2 * totalWeight));
+    }
+
+  mrfFilter->SetMRFNeighborhoodWeight( weights );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// Finally, the classifier class is connected to the Markof Random Fields filter.
+// 
+// Software Guide : EndLatex 
+
+// Software Guide : BeginCodeSnippet
+  mrfFilter->SetClassifier( classifier );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// The output image produced by the \doxygen{itk::MRFImageFilter} has the same pixel
+// type as the labeled input image. In the following lines we use the
+// \code{OutputImageType} in order to instantiate the type of a
+// \doxygen{otb::ImageFileWriter}. Then create one, and connect it to the output of
+// the classification filter after passing it through an intensity rescaler
+// to rescale it to an 8 bit dynamic range
+//
+// Software Guide : EndLatex 
+// Software Guide : BeginCodeSnippet
+  typedef MRFFilterType::OutputImageType  OutputImageType;
+// Software Guide : EndCodeSnippet
+
+  // Rescale outputs to the dynamic range of the display
+  typedef otb::Image< unsigned char, Dimension > RescaledOutputImageType;
+  typedef itk::RescaleIntensityImageFilter< 
+             OutputImageType, RescaledOutputImageType >   RescalerType;
+
+  RescalerType::Pointer intensityRescaler = RescalerType::New();
+  intensityRescaler->SetOutputMinimum(   0 );
+  intensityRescaler->SetOutputMaximum( 255 );
+  intensityRescaler->SetInput( mrfFilter->GetOutput() );  
+
+// Software Guide : BeginCodeSnippet
+  typedef otb::ImageFileWriter< OutputImageType > WriterType;
+
+  WriterType::Pointer writer = WriterType::New();
+  
+  writer->SetInput( intensityRescaler->GetOutput() );
+
+  writer->SetFileName( outputImageFileName );
+// Software Guide : EndCodeSnippet
+
+
+
+
+// Software Guide : BeginLatex
+//
+// We are now ready for triggering the execution of the pipeline. This is done
+// by simply invoking the \code{Update()} method in the writer. This call will
+// propagate the update request to the reader and then to the MRF filter.
+//
+// Software Guide : EndLatex 
+
+
+// Software Guide : BeginCodeSnippet
+  try
+    {
+    writer->Update();
+    }
+  catch( itk::ExceptionObject & excp )
+    {
+    std::cerr << "Problem encountered while writing ";
+    std::cerr << " image file : " << argv[2] << std::endl;
+    std::cerr << excp << std::endl;
+    return EXIT_FAILURE;
+    }
+// Software Guide : EndCodeSnippet
+
+  std::cout << "Number of Iterations : ";
+  std::cout << mrfFilter->GetNumberOfIterations() << std::endl;
+  std::cout << "Stop condition: " << std::endl;
+  std::cout << "  (1) Maximum number of iterations " << std::endl;
+  std::cout << "  (2) Error tolerance:  "  << std::endl;
+  std::cout << mrfFilter->GetStopCondition() << std::endl;
+
+  //  Software Guide : BeginLatex
+  //  
+  // \begin{figure} \center
+  // \includegraphics[width=0.44\textwidth]{ScalarImageMarkovRandomField1Output.eps}
+  // \itkcaption[Output of the ScalarImageMarkovRandomField]{Effect of the
+  // MRF filter.}
+  // \label{fig:ScalarImageMarkovRandomFieldInputOutput}
+  // \end{figure}
+  //
+  //  Figure \ref{fig:ScalarImageMarkovRandomFieldInputOutput}
+  //  illustrates the effect of this filter with four classes.
+  //  In this example the filter was run with a smoothing factor of 3.
+  //  The labeled image was produced by ScalarImageKmeansClassifier.cxx
+  //  and the means were estimated by
+  //  ScalarImageKmeansModelEstimator.cxx described in section
+  //  \ref{sec:KMeansClassifier}.
+  //  The obtained result can be compared with the one of
+  //  figure~\ref{fig:ScalarImageKMeansClassifierOutput} to see the
+  //  interest of using the MRF approach in order to ensure the
+  //  regularization of the classified image.
+  //
+  //  Software Guide : EndLatex 
+
+
+  return EXIT_SUCCESS;
+  
+}
+
+
diff --git a/Examples/Data/ERSAfter.png b/Examples/Data/ERSAfter.png
new file mode 100644
index 0000000000000000000000000000000000000000..dfee1c9ebfb31a1471c5beb5483264a118f3e403
Binary files /dev/null and b/Examples/Data/ERSAfter.png differ
diff --git a/Examples/Data/ERSBefore.png b/Examples/Data/ERSBefore.png
new file mode 100644
index 0000000000000000000000000000000000000000..66d1132a2252435bf667a99abd379da17dbd64c8
Binary files /dev/null and b/Examples/Data/ERSBefore.png differ
diff --git a/Examples/Data/ROI_mask_multi.png b/Examples/Data/ROI_mask_multi.png
index 3a3de297566e508c8eb9162bd71cc70724c242e1..55703ba6936fd9f93ec7210c2adc27995e412255 100644
Binary files a/Examples/Data/ROI_mask_multi.png and b/Examples/Data/ROI_mask_multi.png differ
diff --git a/Examples/Learning/SVMImageClassificationExample.cxx b/Examples/Learning/SVMImageClassificationExample.cxx
index 835976983434226e8b8d5f051a54fab8e79b7e89..8e06630efd6c7f60424155d13784343a38316c63 100644
--- a/Examples/Learning/SVMImageClassificationExample.cxx
+++ b/Examples/Learning/SVMImageClassificationExample.cxx
@@ -388,8 +388,8 @@ int main(int argc, char* argv[] )
 // Figure \ref{fig:SVMCLASS} shows the result of the SVM classification.
 // \begin{figure}
 // \center
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1.eps}
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1_SVN_CLASS.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1_SVN_CLASS.eps}
 // \itkcaption[SVM Image Classification]{Result of the SVM
 // classification . Left: RGB image. Right: image of classes.} 
 // \label{fig:SVMCLASS}
diff --git a/Examples/Learning/SVMImageEstimatorClassificationMultiExample.cxx b/Examples/Learning/SVMImageEstimatorClassificationMultiExample.cxx
index 380687ffde2393638e10a8425de165e29fa18602..bc286eeb37ba8092a34470ea936aa152d7d30434 100644
--- a/Examples/Learning/SVMImageEstimatorClassificationMultiExample.cxx
+++ b/Examples/Learning/SVMImageEstimatorClassificationMultiExample.cxx
@@ -42,8 +42,8 @@
 // and the associated ground truth, which is composed of 4 classes. 
 // \begin{figure}
 // \center
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1.eps}
-// \includegraphics[width=0.35\textwidth]{ROI_mask_multi.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_mask_multi.eps}
 // \itkcaption[SVM Image Model Estimation]{Images used for the
 // estimation of the SVM model. Left: RGB image. Right: image of labels.} 
 // \label{fig:SVMROISMULTI}
@@ -442,8 +442,8 @@ int main( int argc, char* argv[] )
 // Figure \ref{fig:SVMCLASSMULTI} shows the result of the SVM classification.
 // \begin{figure}
 // \center
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1.eps}
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1_SVN_CLASS_MULTI.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1_SVN_CLASS_MULTI.eps}
 // \itkcaption[SVM Image Classification]{Result of the SVM
 // classification . Left: RGB image. Right: image of classes.} 
 // \label{fig:SVMCLASSMULTI}
diff --git a/Examples/Learning/SVMImageModelEstimatorExample.cxx b/Examples/Learning/SVMImageModelEstimatorExample.cxx
index 73816645789df6e89b62d095cd8954e3328857f7..2babd53e6cb1794878b48355e74712d367d06710 100644
--- a/Examples/Learning/SVMImageModelEstimatorExample.cxx
+++ b/Examples/Learning/SVMImageModelEstimatorExample.cxx
@@ -40,8 +40,8 @@
 // figure~\ref{fig:SVMROIS}. 
 // \begin{figure}
 // \center
-// \includegraphics[width=0.35\textwidth]{ROI_QB_MUL_1.eps}
-// \includegraphics[width=0.35\textwidth]{ROI_mask.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_QB_MUL_1.eps}
+// \includegraphics[width=0.45\textwidth]{ROI_mask.eps}
 // \itkcaption[SVM Image Model Estimation]{Images used for the
 // estimation of the SVM model. Left: RGB image. Right: image of labels.} 
 // \label{fig:SVMROIS}
diff --git a/Examples/Visu/CMakeLists.txt b/Examples/Visu/CMakeLists.txt
index 2a8bbeb96a6985a95547e9ccc3144c4b894ba6aa..14757ad86db84e9fabfd6c5c19966efbb902c647 100644
--- a/Examples/Visu/CMakeLists.txt
+++ b/Examples/Visu/CMakeLists.txt
@@ -1,7 +1,10 @@
 PROJECT(FeatureExtraction)
 INCLUDE_REGULAR_EXPRESSION("^.*$")
 
-ADD_EXECUTABLE(GreyVisuExample GreyVisuExample.cxx )
-TARGET_LINK_LIBRARIES(GreyVisuExample OTBVisu OTBCommon OTBIO ITKCommon ITKIO)
+#ADD_EXECUTABLE(GreyVisuExample GreyVisuExample.cxx )
+#TARGET_LINK_LIBRARIES(GreyVisuExample OTBVisu OTBIO OTBCommon cai ITKIO ITKCommon)
+
+ADD_EXECUTABLE(VisuExample1 VisuExample1.cxx )
+TARGET_LINK_LIBRARIES(VisuExample1 OTBVisu OTBIO OTBCommon cai ITKIO ITKCommon)
 
 
diff --git a/Examples/Visu/GreyVisuExample.cxx b/Examples/Visu/GreyVisuExample.cxx
index 815f0ab3d4600e510e9babacf3193b5fb5262857..503d0733e22224b502b36e60bfc51547a81c5aee 100644
--- a/Examples/Visu/GreyVisuExample.cxx
+++ b/Examples/Visu/GreyVisuExample.cxx
@@ -21,6 +21,7 @@
 
 #include "otbImageFileReader.h"
 #include "otbImage.h"
+#include "otbVectorImage.h"
 
   // Software Guide : BeginLatex
   //
@@ -58,9 +59,10 @@ int main( int argc, char ** argv )
   // Software Guide : EndLatex
 
   // Siftware Guide : BeginCodeSnippet
-  typedef otb::Image< unsigned char, 2 > ImageType;
+  typedef int PixelType;
+  typedef otb::VectorImage< PixelType, 2 > ImageType;
   typedef otb::ImageFileReader< ImageType > ReaderType;    
-  typedef otb::ImageViewer< ImageType::PixelType>    ViewerType;
+  typedef otb::ImageViewer< PixelType >    ViewerType;
   // Software Guide : EndCodeSnippet
 
   // Software Guide : BeginLatex
diff --git a/Examples/Visu/VisuExample1.cxx b/Examples/Visu/VisuExample1.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..4977e013a44e6260e13456055e4161e6f87c878d
--- /dev/null
+++ b/Examples/Visu/VisuExample1.cxx
@@ -0,0 +1,147 @@
+/*=========================================================================
+
+  Program:   ORFEO Toolbox
+  Language:  C++
+  Date:      $Date$
+  Version:   $Revision$
+
+
+  Copyright (c) Centre National d'Etudes Spatiales. All rights reserved.
+  See OTBCopyright.txt for details.
+
+
+     This software is distributed WITHOUT ANY WARRANTY; without even 
+     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 
+     PURPOSE.  See the above copyright notices for more information.
+
+=========================================================================*/
+
+#include <stdlib.h>
+#include "itkExceptionObject.h"
+
+#include "otbImageFileReader.h"
+#include "otbImage.h"
+#include "otbVectorImage.h"
+
+  // Software Guide : BeginLatex
+  //
+  // This example shows the use of the \doxygen{otb::ImageViewer}
+  // class for image visualization. As usual, we start by
+  // including the header file for the class.
+  //
+  // Software Guide : EndLatex
+
+// Software Guide : BeginCodeSnippet
+#include "otbImageViewer.h"
+// Software Guide : EndCodeSnippet
+
+int main( int argc, char ** argv )
+{
+
+
+
+  if( argc != 2)
+    {
+    std::cout <<" Usage : InputImage"<<std::endl;
+    }
+        
+    
+  const char * inputFilename  = argv[1];
+
+  // Software Guide : BeginLatex
+  //
+  // We will build a very simple pipeline where a reader gets an image
+  // from a file and gives it to the viewer. We define the types for
+  // the pixel, the image and the reader. The viewer class is templated
+  // over the scalar component of the pixel type.
+  //
+  // Software Guide : EndLatex
+
+  // Siftware Guide : BeginCodeSnippet
+  typedef int PixelType;
+  typedef otb::VectorImage< PixelType, 2 > ImageType;
+  typedef otb::ImageFileReader< ImageType > ReaderType;    
+  typedef otb::ImageViewer< PixelType >    ViewerType;
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // We create the objects.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet
+  ViewerType::Pointer  lViewer = ViewerType::New();
+  ReaderType::Pointer       lReader = ReaderType::New();
+  lReader->SetFileName(inputFilename);
+  lReader->Update();
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // We can choose a label for the windows created by the viewer.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet    
+  lViewer->SetLabel( "My Image" );
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // We can now plug the pipeline and trigger the visualization by
+  // using the \code{Show} method.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet    
+
+  lViewer->SetImage( lReader->GetOutput() );  
+
+  lViewer->Show();
+
+  // Software Guide : EndCodeSnippet
+
+  // Software Guide : BeginLatex
+  //
+  // The last step consists in starting the GUI event loop by calling
+  // the appropiate FLTK method.
+  //
+  // Software Guide : EndLatex
+
+  // Software Guide : BeginCodeSnippet    
+
+  Fl::run();
+
+  // Software Guide : EndCodeSnippet
+  // Software Guide : BeginLatex
+  //
+  // \begin{figure}
+  // \center
+  // \includegraphics[width=.8\textwidth]{VisuGrey.eps}
+  // \itkcaption[Image visualization.]{Example of image visualization.}
+  // \label{fig:Visu1}
+  // \end{figure}
+  // The the \doxygen{otb::ImageViewer} class creates 3 windows (see
+  // figure \ref{fig:Visu1}) for an improved visualization of large
+  // images. This procedure is inspired from the navigation window of
+  // the Gimp and other image visualization tools. The navigation
+  // window is called here \emph{scroll} window and it shows the
+  // complete image but subsampled to a lower resolution. The pricipal
+  // window shows the region marked by a red rectangle in the scroll
+  // window using the real resolution of the image. Finally, a zoom
+  // window displays the region inside the red rectangle shown in the
+  // principal window. A mouse click on a pixel of the scroll (respectively, the
+  // pricipal window) updates the rectangle prosition and, therefore,
+  // the region viewed in the principal (respectively, the zoom)
+  // window. The zoom rate can be modified by using the + and - keys.
+  //
+  // Software Guide : EndLatex
+
+    
+
+
+  return EXIT_SUCCESS;
+}
+
+