diff --git a/CMake/FindShark.cmake b/CMake/FindShark.cmake
index 59bef138f730718a535ef984deace363cf4f8a47..523f1ee7dcc4498927118bd6427ca906ffac9f11 100644
--- a/CMake/FindShark.cmake
+++ b/CMake/FindShark.cmake
@@ -97,25 +97,43 @@ if(SHARK_CONFIG_FILE)
   "${SHARK_VERSION_MAJOR}.${SHARK_VERSION_MINOR}.${SHARK_VERSION_PATCH}")
 endif()
 
-set(SHARK_USE_OPENMP_matched)
-#define SHARK_USE_OPENMP
+# Check if Shark was built with OpenMP, CBLAS, DYNLIB, ...
 file(STRINGS "${SHARK_INCLUDE_DIR}/shark/Core/Shark.h" SHARK_H_CONTENTS)
-string(REGEX MATCH
-  "#define.SHARK_USE_OPENMP"
-  SHARK_USE_OPENMP_matched "${SHARK_H_CONTENTS}")
 
-if(SHARK_USE_OPENMP_matched)
-  if(NOT OTB_USE_OPENMP)
-    message(WARNING "Shark library is built with OpenMP and you have OTB_USE_OPENMP set to OFF.")
-  endif()
+if(SHARK_H_CONTENTS MATCHES "#define.SHARK_USE_OPENMP")
+  set(SHARK_USE_OPENMP 1)
+else()
+  set(SHARK_USE_OPENMP 0)
+endif()
+
+if(SHARK_H_CONTENTS MATCHES "#define.SHARK_USE_CBLAS")
+  set(SHARK_USE_CBLAS 1)
+else()
+  set(SHARK_USE_CBLAS 0)
+endif()
+
+if(SHARK_H_CONTENTS MATCHES "#define.SHARK_USE_DYNLIB")
+  set(SHARK_USE_DYNLIB 1)
+else()
+  set(SHARK_USE_DYNLIB 0)
+endif()
+
+if(SHARK_USE_CBLAS AND SHARK_USE_DYNLIB)
+  set(REQUIRED_CBLAS_LIB CBLAS_LIBRARY)
+  find_library(CBLAS_LIBRARY NAMES cblas)
+else()
+  set(REQUIRED_CBLAS_LIB)
 endif()
 
 INCLUDE(${CMAKE_ROOT}/Modules/FindPackageHandleStandardArgs.cmake)
 FIND_PACKAGE_HANDLE_STANDARD_ARGS(Shark
-  REQUIRED_VARS SHARK_LIBRARY SHARK_INCLUDE_DIR
+  REQUIRED_VARS SHARK_LIBRARY SHARK_INCLUDE_DIR ${REQUIRED_CBLAS_LIB}
   VERSION_VAR SHARK_VERSION_STRING)
 
 if(SHARK_FOUND)
   set(SHARK_INCLUDE_DIRS ${SHARK_INCLUDE_DIR} ${Boost_INCLUDE_DIR} )
   set(SHARK_LIBRARIES ${SHARK_LIBRARY} ${Boost_LIBRARIES} )
+  if(REQUIRED_CBLAS_LIB)
+    set(SHARK_LIBRARIES ${SHARK_LIBRARIES} ${CBLAS_LIBRARY})
+  endif()
 endif()
diff --git a/Modules/Applications/AppDimensionalityReduction/app/CMakeLists.txt b/Modules/Applications/AppDimensionalityReduction/app/CMakeLists.txt
index 63c4a2105666056a75d4f7b02a8a1dbac75a2e50..6e57a26cb83cf691166b6f1e8e98dff98a21c578 100644
--- a/Modules/Applications/AppDimensionalityReduction/app/CMakeLists.txt
+++ b/Modules/Applications/AppDimensionalityReduction/app/CMakeLists.txt
@@ -22,3 +22,22 @@ otb_create_application(
   NAME           DimensionalityReduction
   SOURCES        otbDimensionalityReduction.cxx
   LINK_LIBRARIES ${${otb-module}_LIBRARIES})
+
+OTB_CREATE_APPLICATION(
+  NAME TrainDimensionalityReduction
+  SOURCES otbTrainDimensionalityReduction.cxx
+  LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${OTBCommon_LIBRARIES} ${OTBITK_LIBRARIES} ${OTBBoost_LIBRARIES} ${OTBShark_LIBRARIES} 
+  )
+
+OTB_CREATE_APPLICATION(
+  NAME ImageDimensionalityReduction
+  SOURCES otbImageDimensionalityReduction.cxx
+  LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${OTBCommon_LIBRARIES} ${OTBITK_LIBRARIES} ${OTBBoost_LIBRARIES} ${OTBShark_LIBRARIES} 
+  )
+
+OTB_CREATE_APPLICATION(
+  NAME VectorDimensionalityReduction
+  SOURCES otbVectorDimensionalityReduction.cxx
+  LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${OTBCommon_LIBRARIES} ${OTBITK_LIBRARIES} ${OTBBoost_LIBRARIES} ${OTBShark_LIBRARIES} 
+  )
+
diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..c221302c033c889e1ddb601662ab840f01257a22
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "otbWrapperApplication.h"
+#include "otbWrapperApplicationFactory.h"
+
+#include "itkUnaryFunctorImageFilter.h"
+#include "otbChangeLabelImageFilter.h"
+#include "otbStandardWriterWatcher.h"
+#include "otbStatisticsXMLFileReader.h"
+#include "otbShiftScaleVectorImageFilter.h"
+#include "otbImageDimensionalityReductionFilter.h"
+#include "otbMultiToMonoChannelExtractROI.h"
+#include "otbImageToVectorImageCastFilter.h"
+#include "otbDimensionalityReductionModelFactory.h"
+
+namespace otb
+{
+namespace Functor
+{
+/**
+ * simple affine function : y = ax+b
+ */
+template<class TInput, class TOutput>
+class AffineFunctor
+{
+public:
+  typedef double InternalType;
+  
+  // constructor
+  AffineFunctor() : m_A(1.0),m_B(0.0) {}
+  
+  // destructor
+  virtual ~AffineFunctor() {}
+  
+  void SetA(InternalType a)
+    {
+    m_A = a;
+    }
+  
+  void SetB(InternalType b)
+    {
+    m_B = b;
+    }
+  
+  inline TOutput operator()(const TInput & x) const
+    {
+    return static_cast<TOutput>( static_cast<InternalType>(x)*m_A + m_B);
+    }
+private:
+  InternalType m_A;
+  InternalType m_B;
+};
+  
+} // end of namespace Functor
+
+namespace Wrapper
+{
+/**
+ * \class ImageDimensionalityReduction
+ *
+ * Apply a dimensionality reduction model to an image
+ */
+class ImageDimensionalityReduction : public Application
+{
+public:
+  /** Standard class typedefs. */
+  typedef ImageDimensionalityReduction             Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Standard macro */
+  itkNewMacro(Self);
+
+  itkTypeMacro(ImageDimensionalityReduction, otb::Application);
+
+  /** Filters typedef */
+  typedef UInt8ImageType                                  MaskImageType;
+  typedef itk::VariableLengthVector<
+    FloatVectorImageType::InternalPixelType>              MeasurementType;
+  typedef otb::StatisticsXMLFileReader<MeasurementType>   StatisticsReader;
+  typedef otb::ShiftScaleVectorImageFilter<
+    FloatVectorImageType, FloatVectorImageType>           RescalerType;
+  typedef itk::UnaryFunctorImageFilter<
+      FloatImageType,
+      FloatImageType,
+      otb::Functor::AffineFunctor<float,float> >          OutputRescalerType;
+  typedef otb::ImageDimensionalityReductionFilter<
+    FloatVectorImageType,
+    FloatVectorImageType,
+    MaskImageType>                                        DimensionalityReductionFilterType;
+  typedef DimensionalityReductionFilterType::Pointer      DimensionalityReductionFilterPointerType;
+  typedef DimensionalityReductionFilterType::ModelType    ModelType;
+  typedef ModelType::Pointer                              ModelPointerType;
+  typedef DimensionalityReductionFilterType::ValueType    ValueType;
+  typedef DimensionalityReductionFilterType::LabelType    LabelType;
+  typedef otb::DimensionalityReductionModelFactory<
+    ValueType, LabelType>                                 DimensionalityReductionModelFactoryType;
+
+protected:
+
+  ~ImageDimensionalityReduction() ITK_OVERRIDE
+  {
+    DimensionalityReductionModelFactoryType::CleanFactories();
+  }
+
+private:
+  void DoInit() ITK_OVERRIDE
+  {
+    SetName("DimensionalityReduction");
+    SetDescription("Performs dimensionality reduction of the input image "
+      "according to a dimensionality reduction model file.");
+
+    // Documentation
+    SetDocName("DimensionalityReduction");
+    SetDocLongDescription("This application reduces the dimension of an input"
+                          " image, based on a machine learning model file produced by"
+                          " the TrainDimensionalityReduction application. Pixels of the "
+                          "output image will contain the reduced values from"
+                          "the model. The input pixels"
+                          " can be optionally centered and reduced according "
+                          "to the statistics file produced by the "
+                          "ComputeImagesStatistics application. ");
+
+    SetDocLimitations("The input image must contain the feature bands used for"
+                      " the model training. "
+                      "If a statistics file was used during training by the "
+                      "Training application, it is mandatory to use the same "
+                      "statistics file for reduction.");
+    SetDocAuthors("OTB-Team");
+    SetDocSeeAlso("TrainDimensionalityReduction, ComputeImagesStatistics");
+
+    AddDocTag(Tags::Learning);
+
+    AddParameter(ParameterType_InputImage, "in",  "Input Image");
+    SetParameterDescription( "in", "The input image to predict.");
+
+    AddParameter(ParameterType_InputImage,  "mask",   "Input Mask");
+    SetParameterDescription( "mask", "The mask allow restricting "
+      "classification of the input image to the area where mask pixel values "
+      "are greater than 0.");
+    MandatoryOff("mask");
+
+    AddParameter(ParameterType_InputFilename, "model", "Model file");
+    SetParameterDescription("model", "A dimensionality reduction model file (produced by "
+                            "TrainRegression application).");
+
+    AddParameter(ParameterType_InputFilename, "imstat", "Statistics file");
+    SetParameterDescription("imstat", "A XML file containing mean and standard"
+      " deviation to center and reduce samples before prediction "
+      "(produced by ComputeImagesStatistics application). If this file contains"
+                            "one more bands than the sample size, the last stat of last band will be"
+                            "applied to expand the output predicted value");
+    MandatoryOff("imstat");
+
+    AddParameter(ParameterType_OutputImage, "out",  "Output Image");
+    SetParameterDescription( "out", "Output image containing reduced values");
+
+    AddRAMParameter();
+
+   // Doc example parameter settings
+    SetDocExampleParameterValue("in", "QB_1_ortho.tif");
+    SetDocExampleParameterValue("imstat", "EstimateImageStatisticsQB1.xml");
+    SetDocExampleParameterValue("model", "clsvmModelQB1.model");
+    SetDocExampleParameterValue("out", "ReducedImageQB1.tif");
+  }
+
+  void DoUpdateParameters() ITK_OVERRIDE
+  {
+    // Nothing to do here : all parameters are independent
+  }
+
+  void DoExecute() ITK_OVERRIDE
+  {
+    // Load input image
+    FloatVectorImageType::Pointer inImage = GetParameterImage("in");
+    inImage->UpdateOutputInformation();
+    unsigned int nbFeatures = inImage->GetNumberOfComponentsPerPixel();
+
+    // Load DR model using a factory
+    otbAppLogINFO("Loading model");
+    m_Model = DimensionalityReductionModelFactoryType::CreateDimensionalityReductionModel(
+      GetParameterString("model"),
+      DimensionalityReductionModelFactoryType::ReadMode);
+
+    if (m_Model.IsNull())
+      {
+      otbAppLogFATAL(<< "Error when loading model " << GetParameterString("model")
+        << " : unsupported model type");
+      }
+
+    m_Model->Load(GetParameterString("model"));
+    otbAppLogINFO("Model loaded, dimension = "<< m_Model->GetDimension());
+
+    // Classify
+    m_ClassificationFilter = DimensionalityReductionFilterType::New();
+    m_ClassificationFilter->SetModel(m_Model);
+    
+    FloatVectorImageType::Pointer outputImage = m_ClassificationFilter->GetOutput();
+
+    // Normalize input image if asked
+    if( IsParameterEnabled("imstat") && HasValue("imstat") )
+      {
+      otbAppLogINFO("Input image normalization activated.");
+      // Normalize input image (optional)
+      StatisticsReader::Pointer  statisticsReader = StatisticsReader::New();
+      MeasurementType  meanMeasurementVector;
+      MeasurementType  stddevMeasurementVector;
+      m_Rescaler = RescalerType::New();
+      
+      // Load input image statistics
+      statisticsReader->SetFileName(GetParameterString("imstat"));
+      meanMeasurementVector   = statisticsReader->GetStatisticVectorByName("mean");
+      stddevMeasurementVector = statisticsReader->GetStatisticVectorByName("stddev");
+      otbAppLogINFO( "mean used: " << meanMeasurementVector );
+      otbAppLogINFO( "standard deviation used: " << stddevMeasurementVector );
+      if (meanMeasurementVector.Size() != nbFeatures)
+        {
+        otbAppLogFATAL("Wrong number of components in statistics file : "<<meanMeasurementVector.Size());
+        }
+        
+      // Rescale vector image
+      m_Rescaler->SetScale(stddevMeasurementVector);
+      m_Rescaler->SetShift(meanMeasurementVector);
+      m_Rescaler->SetInput(inImage);
+
+      m_ClassificationFilter->SetInput(m_Rescaler->GetOutput());
+      }
+    else
+      {
+      otbAppLogINFO("Input image normalization deactivated.");
+      m_ClassificationFilter->SetInput(inImage);
+      }
+
+    if(IsParameterEnabled("mask"))
+      {
+      otbAppLogINFO("Using input mask");
+      // Load mask image and cast into LabeledImageType
+      MaskImageType::Pointer inMask = GetParameterUInt8Image("mask");
+
+      m_ClassificationFilter->SetInputMask(inMask);
+      }
+
+    SetParameterOutputImage<FloatVectorImageType>("out", outputImage);
+  }
+
+  DimensionalityReductionFilterType::Pointer m_ClassificationFilter;
+  ModelPointerType m_Model;
+  RescalerType::Pointer m_Rescaler;
+  OutputRescalerType::Pointer m_OutRescaler;
+};
+
+} // end of namespace Wrapper
+} // end of namespace otb
+
+OTB_APPLICATION_EXPORT(otb::Wrapper::ImageDimensionalityReduction)
diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbTrainDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbTrainDimensionalityReduction.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..1cbc567c562948f356f55b84f0a4bf42a7f7b0da
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/app/otbTrainDimensionalityReduction.cxx
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "otbWrapperApplication.h"
+#include "otbWrapperApplicationFactory.h"
+
+#include "otbOGRDataSourceWrapper.h"
+#include "otbOGRFeatureWrapper.h"
+
+#include "itkVariableLengthVector.h"
+
+#include "otbShiftScaleSampleListFilter.h"
+#include "otbStatisticsXMLFileReader.h"
+
+#include <fstream> // write the model file
+
+#include "otbDimensionalityReductionModelFactory.h"
+#include "otbTrainDimensionalityReductionApplicationBase.h"
+
+namespace otb
+{
+namespace Wrapper
+{
+
+/**
+ * \class TrainDimensionalityReduction
+ *
+ * Training of a dimensionality reduction model
+ */
+class TrainDimensionalityReduction : public TrainDimensionalityReductionApplicationBase<float,float>
+{
+public:
+  typedef TrainDimensionalityReduction Self;
+  typedef TrainDimensionalityReductionApplicationBase<float, float> Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+  
+  itkNewMacro(Self);
+  itkTypeMacro(TrainDimensionalityReduction, otb::Application);
+
+  typedef Superclass::SampleType              SampleType;
+  typedef Superclass::ListSampleType          ListSampleType;
+  typedef Superclass::SampleImageType         SampleImageType;
+
+  typedef float ValueType;
+  typedef itk::VariableLengthVector<ValueType> MeasurementType;
+
+  typedef otb::StatisticsXMLFileReader<SampleType> StatisticsReader;
+
+  typedef otb::Statistics::ShiftScaleSampleListFilter<ListSampleType, ListSampleType> ShiftScaleFilterType;
+
+  typedef otb::DimensionalityReductionModelFactory<ValueType, ValueType>  ModelFactoryType;
+
+private:
+  void DoInit()
+  {
+    SetName("TrainDimensionalityReduction");
+    SetDescription("Train a dimensionality reduction model");
+
+    SetDocName("Train Dimensionality Reduction");
+    SetDocLongDescription("Trainer for dimensionality reduction algorithms "
+      "(autoencoders, PCA, SOM). All input samples are used to compute the "
+      "model, like other machine learning models.\n"
+      "The model can be used in the ImageDimensionalityReduction and "
+      "VectorDimensionalityReduction applications.");
+
+    SetDocLimitations("None");
+    SetDocAuthors("OTB-Team");
+    SetDocSeeAlso("ImageDimensionalityReduction, VectorDimensionalityReduction");
+
+    AddParameter(ParameterType_Group, "io", "Input and output data");
+    SetParameterDescription("io", "This group of parameters allows setting input and output data.");
+
+    AddParameter(ParameterType_InputVectorData, "io.vd", "Input Vector Data");
+    SetParameterDescription("io.vd", "Input geometries used for training (note "
+      ": all geometries from the layer will be used)");
+
+    AddParameter(ParameterType_OutputFilename, "io.out", "Output model");
+    SetParameterDescription("io.out", "Output file containing the estimated model (.txt format).");
+
+    AddParameter(ParameterType_InputFilename, "io.stats", "Input XML image statistics file");
+    MandatoryOff("io.stats");
+    SetParameterDescription("io.stats", "XML file containing mean and variance of each feature.");
+
+    AddParameter(ParameterType_StringList, "feat", "Field names to be used for training."); //
+    SetParameterDescription("feat","List of field names in the input vector data"
+      " used as features for training."); //
+
+    Superclass::DoInit();
+
+    AddRAMParameter();
+
+    // Doc example parameter settings
+    SetDocExampleParameterValue("io.vd", "cuprite_samples.sqlite");
+    SetDocExampleParameterValue("io.out", "mode.ae");
+    SetDocExampleParameterValue("algorithm", "pca");
+    SetDocExampleParameterValue("algorithm.pca.dim", "8");
+    SetDocExampleParameterValue("feat","value_0 value_1 value_2 value_3 value_4"
+      " value_5 value_6 value_7 value_8 value_9");
+  }
+
+  void DoUpdateParameters()
+  {
+  }
+
+  void DoExecute()
+  {
+    std::string shapefile = GetParameterString("io.vd");
+
+    otb::ogr::DataSource::Pointer source =
+      otb::ogr::DataSource::New(shapefile, otb::ogr::DataSource::Modes::Read);
+    otb::ogr::Layer layer = source->GetLayer(0);
+    ListSampleType::Pointer input = ListSampleType::New();
+    const int nbFeatures = GetParameterStringList("feat").size();
+
+    input->SetMeasurementVectorSize(nbFeatures);
+    otb::ogr::Layer::const_iterator it = layer.cbegin();
+    otb::ogr::Layer::const_iterator itEnd = layer.cend();
+    for( ; it!=itEnd ; ++it)
+      {
+      MeasurementType mv;
+      mv.SetSize(nbFeatures);
+      for(int idx=0; idx < nbFeatures; ++idx)
+        {
+        mv[idx] = (*it)[GetParameterStringList("feat")[idx]].GetValue<double>();
+        }
+      input->PushBack(mv);
+      }
+
+    MeasurementType meanMeasurementVector;
+    MeasurementType stddevMeasurementVector;
+
+    if (HasValue("io.stats") && IsParameterEnabled("io.stats"))
+      {
+      StatisticsReader::Pointer statisticsReader = StatisticsReader::New();
+      std::string XMLfile = GetParameterString("io.stats");
+      statisticsReader->SetFileName(XMLfile);
+      meanMeasurementVector = statisticsReader->GetStatisticVectorByName("mean");
+      stddevMeasurementVector = statisticsReader->GetStatisticVectorByName("stddev");
+      }
+    else
+      {
+      meanMeasurementVector.SetSize(nbFeatures);
+      meanMeasurementVector.Fill(0.);
+      stddevMeasurementVector.SetSize(nbFeatures);
+      stddevMeasurementVector.Fill(1.);
+      }
+
+    ShiftScaleFilterType::Pointer trainingShiftScaleFilter = ShiftScaleFilterType::New();
+    trainingShiftScaleFilter->SetInput(input);
+    trainingShiftScaleFilter->SetShifts(meanMeasurementVector);
+    trainingShiftScaleFilter->SetScales(stddevMeasurementVector);
+    trainingShiftScaleFilter->Update();
+
+    ListSampleType::Pointer trainingListSample= trainingShiftScaleFilter->GetOutput();
+
+    this->Train(trainingListSample,GetParameterString("io.out"));
+  }
+
+};
+
+} // end of namespace Wrapper
+} // end of namespace otb
+
+OTB_APPLICATION_EXPORT(otb::Wrapper::TrainDimensionalityReduction)
diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..132e13875cbffdc0d6152601173c7a24e89afd9a
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/app/otbVectorDimensionalityReduction.cxx
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "otbWrapperApplication.h"
+#include "otbWrapperApplicationFactory.h"
+#include "otbOGRDataSourceWrapper.h"
+#include "otbOGRFeatureWrapper.h"
+#include "itkVariableLengthVector.h"
+#include "otbStatisticsXMLFileReader.h"
+#include "itkListSample.h"
+#include "otbShiftScaleSampleListFilter.h"
+#include "otbDimensionalityReductionModelFactory.h"
+#include <time.h>
+
+namespace otb
+{
+namespace Wrapper
+{
+  
+/** Utility function to negate std::isalnum */
+bool IsNotAlphaNum(char c)
+{
+return !std::isalnum(c);
+}
+
+/**
+ * \class VectorDimensionalityReduction
+ *
+ * Apply a dimensionality reduction model on a vector file
+ */
+class VectorDimensionalityReduction : public Application
+{
+public:
+    /** Standard class typedefs. */
+  typedef VectorDimensionalityReduction Self;
+  typedef Application Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Standard macro */
+  itkNewMacro(Self);
+  itkTypeMacro(Self, Application)
+
+  /** Filters typedef */
+  typedef float                                           ValueType;
+  typedef itk::VariableLengthVector<ValueType>            InputSampleType;
+  typedef itk::Statistics::ListSample<InputSampleType>    ListSampleType;
+  typedef MachineLearningModel<
+    itk::VariableLengthVector<ValueType>,
+    itk::VariableLengthVector<ValueType> >                DimensionalityReductionModelType;
+  typedef DimensionalityReductionModelFactory<
+    ValueType,ValueType>                                  DimensionalityReductionModelFactoryType;
+  typedef DimensionalityReductionModelType::Pointer       ModelPointerType;
+
+  /** Statistics Filters typedef */
+  typedef itk::VariableLengthVector<ValueType>            MeasurementType;
+  typedef otb::StatisticsXMLFileReader<MeasurementType>   StatisticsReader;
+  typedef otb::Statistics::ShiftScaleSampleListFilter<
+    ListSampleType, ListSampleType>                       ShiftScaleFilterType;
+
+protected:
+  ~VectorDimensionalityReduction() ITK_OVERRIDE
+    {
+    DimensionalityReductionModelFactoryType::CleanFactories();
+    }
+
+private:  
+  void DoInit() ITK_OVERRIDE
+    {
+    SetName("VectorDimensionalityReduction");
+    SetDescription("Performs dimensionality reduction of the input vector data "
+      "according to a model file.");
+    SetDocName("Vector Dimensionality Reduction");
+    SetDocAuthors("OTB-Team");
+    SetDocLongDescription("This application performs a vector data "
+      "dimensionality reduction based on a model file produced by the "
+      "TrainDimensionalityReduction application.");
+    SetDocSeeAlso("TrainDimensionalityReduction");
+    SetDocLimitations("None");
+    AddDocTag(Tags::Learning);
+
+    AddParameter(ParameterType_InputVectorData, "in", "Name of the input vector data");
+    SetParameterDescription("in","The input vector data to reduce.");
+
+    AddParameter(ParameterType_InputFilename, "instat", "Statistics file");
+    SetParameterDescription("instat", "A XML file containing mean and standard "
+      "deviation to center and reduce samples before dimensionality reduction "
+      "(produced by ComputeImagesStatistics application).");
+    MandatoryOff("instat");
+
+    AddParameter(ParameterType_InputFilename, "model", "Model file");
+    SetParameterDescription("model", "A model file (produced by the "
+      "TrainDimensionalityReduction application,");
+
+    AddParameter(ParameterType_OutputFilename, "out", "Output vector data file "
+      "containing the reduced vector");
+    SetParameterDescription("out","Output vector data file storing sample "
+      "values (OGR format). If not given, the input vector data file is used. "
+      "In overwrite mode, the original features will be lost.");
+    MandatoryOff("out");
+
+    AddParameter(ParameterType_ListView, "feat", "Input features to use for reduction."); //
+    SetParameterDescription("feat","List of field names in the input vector "
+      "data used as features for reduction."); //
+
+    AddParameter(ParameterType_Choice, "featout", "Output feature"); //
+    SetParameterDescription("featout", "Naming of output features");
+
+    AddChoice("featout.prefix", "Prefix");
+    SetParameterDescription("featout.prefix", "Use a name prefix");
+
+    AddParameter(ParameterType_String, "featout.prefix.name", "Feature name prefix");
+    SetParameterDescription("featout.prefix.name","Name prefix for output "
+      "features. This prefix is followed by the numeric index of each output feature.");
+    SetParameterString("featout.prefix.name","reduced_", false);
+
+    AddChoice("featout.list","List");
+    SetParameterDescription("featout.list", "Use a list with all names");
+
+    AddParameter(ParameterType_StringList, "featout.list.names", "Feature name list");
+    SetParameterDescription("featout.list.names","List of field names for the output "
+      "features which result from the reduction."); //
+
+    AddParameter(ParameterType_Int, "pcadim", "Principal component dimension"); //
+    SetParameterDescription("pcadim","This optional parameter can be set to "
+      "reduce the number of eignevectors used in the PCA model file. This "
+      "parameter can't be used for other models"); //
+    MandatoryOff("pcadim");
+    
+    AddParameter(ParameterType_Choice, "mode", "Writting mode"); //
+    SetParameterDescription("mode", "This parameter determines if the output "
+      "file is overwritten or updated [overwrite/update]. If an output file "
+      "name is given, the original file is copied before creating the new features.");
+
+    AddChoice("mode.overwrite", "Overwrite");
+    SetParameterDescription("mode.overwrite","Overwrite mode"); //
+
+    AddChoice("mode.update", "Update");
+    SetParameterDescription("mode.update", "Update mode");
+
+    // Doc example parameter settings
+    SetDocExampleParameterValue("in", "vectorData.shp");
+    SetDocExampleParameterValue("instat", "meanVar.xml");
+    SetDocExampleParameterValue("model", "model.txt");
+    SetDocExampleParameterValue("out", "vectorDataOut.shp");
+    SetDocExampleParameterValue("feat", "perimeter area width");
+    //SetOfficialDocLink(); 
+    }
+
+  void DoUpdateParameters() ITK_OVERRIDE
+    {
+    if ( HasValue("in") )
+      {
+      std::string shapefile = GetParameterString("in");
+      otb::ogr::DataSource::Pointer ogrDS;
+      OGRSpatialReference oSRS("");
+      std::vector<std::string> options;
+      ogrDS = otb::ogr::DataSource::New(shapefile, otb::ogr::DataSource::Modes::Read);
+      otb::ogr::Layer layer = ogrDS->GetLayer(0);
+      OGRFeatureDefn &layerDefn = layer.GetLayerDefn();
+      ClearChoices("feat");
+
+      for(int iField=0; iField< layerDefn.GetFieldCount(); iField++)
+        {
+        std::string item = layerDefn.GetFieldDefn(iField)->GetNameRef();
+        std::string key(item);
+        std::string::iterator end = std::remove_if( key.begin(), key.end(), IsNotAlphaNum );
+        std::transform( key.begin(), end, key.begin(), tolower );
+        std::string tmpKey = "feat." + key.substr( 0, static_cast<unsigned long>( end - key.begin() ) );
+        AddChoice(tmpKey,item);
+        }
+      }
+    }
+
+  void DoExecute() ITK_OVERRIDE
+    {
+    clock_t tic = clock();
+
+    std::string shapefile = GetParameterString("in");
+    otb::ogr::DataSource::Pointer source = otb::ogr::DataSource::New(
+      shapefile, otb::ogr::DataSource::Modes::Read);
+    otb::ogr::Layer layer = source->GetLayer(0);
+    ListSampleType::Pointer input = ListSampleType::New();
+    std::vector<int> inputIndexes = GetSelectedItems("feat");
+    int nbFeatures = inputIndexes.size();
+
+    input->SetMeasurementVectorSize(nbFeatures);
+    otb::ogr::Layer::const_iterator it = layer.cbegin();
+    otb::ogr::Layer::const_iterator itEnd = layer.cend();
+
+    // Get the list of non-selected field indexes
+    // /!\ The 'feat' is assumed to expose all available fields, hence the
+    // mapping between GetSelectedItems() and OGR field indexes
+    OGRFeatureDefn &inLayerDefn = layer.GetLayerDefn();
+    std::set<int> otherInputFields;
+    for (int i=0 ; i < inLayerDefn.GetFieldCount() ; i++)
+      otherInputFields.insert(i);
+    for (int k=0 ; k < nbFeatures ; k++)
+      otherInputFields.erase(inputIndexes[k]);
+
+    for( ; it!=itEnd ; ++it)
+      {
+      MeasurementType mv;
+      mv.SetSize(nbFeatures);
+      
+      for(int idx=0; idx < nbFeatures; ++idx)
+        {
+        mv[idx] = static_cast<float>( (*it)[inputIndexes[idx]].GetValue<double>() );
+        }
+      input->PushBack(mv);
+      }
+
+    /** Statistics for shift/scale */    
+    MeasurementType meanMeasurementVector;
+    MeasurementType stddevMeasurementVector;
+
+    if (HasValue("instat") && IsParameterEnabled("instat"))
+      {
+      StatisticsReader::Pointer statisticsReader = StatisticsReader::New();
+      std::string XMLfile = GetParameterString("instat");
+      statisticsReader->SetFileName(XMLfile);
+      meanMeasurementVector = statisticsReader->GetStatisticVectorByName("mean");
+      stddevMeasurementVector = statisticsReader->GetStatisticVectorByName("stddev");
+      otbAppLogINFO("Mean used: " << meanMeasurementVector);
+      otbAppLogINFO("Standard deviation used: " << stddevMeasurementVector);
+      }
+    else
+      {
+      meanMeasurementVector.SetSize(nbFeatures);
+      meanMeasurementVector.Fill(0.);
+      stddevMeasurementVector.SetSize(nbFeatures);
+      stddevMeasurementVector.Fill(1.);
+      }
+
+    ShiftScaleFilterType::Pointer trainingShiftScaleFilter = ShiftScaleFilterType::New();
+    trainingShiftScaleFilter->SetInput(input);
+    trainingShiftScaleFilter->SetShifts(meanMeasurementVector);
+    trainingShiftScaleFilter->SetScales(stddevMeasurementVector);
+    trainingShiftScaleFilter->Update();
+
+    otbAppLogINFO("Loading model");
+    /** Read the model */
+    m_Model = DimensionalityReductionModelFactoryType::CreateDimensionalityReductionModel(
+      GetParameterString("model"),
+      DimensionalityReductionModelFactoryType::ReadMode);
+    if (m_Model.IsNull())
+      {
+      otbAppLogFATAL(<< "Error when loading model " << GetParameterString("model")
+        << " : unsupported model type");
+      }
+    m_Model->Load(GetParameterString("model"));
+    if (HasValue("pcadim") && IsParameterEnabled("pcadim"))
+      {
+      std::string modelName(m_Model->GetNameOfClass());
+      if (modelName != "PCAModel")
+        {
+        otbAppLogFATAL(<< "Can't set 'pcadim' on a model : "<< modelName);
+        }
+      m_Model->SetDimension( GetParameterInt("pcadim") );
+      }
+    otbAppLogINFO("Model loaded, dimension : "<< m_Model->GetDimension());
+
+    /** Perform Dimensionality Reduction */    
+    ListSampleType::Pointer listSample = trainingShiftScaleFilter->GetOutput();
+    ListSampleType::Pointer target = m_Model->PredictBatch(listSample);
+
+    /** Create/Update Output Shape file */          
+    ogr::DataSource::Pointer output;
+    ogr::DataSource::Pointer buffer = ogr::DataSource::New();
+    bool updateMode = false;
+
+    if (IsParameterEnabled("out") && HasValue("out"))
+      {
+      // Create new OGRDataSource
+      if (GetParameterString("mode")=="overwrite")
+        {
+        output = ogr::DataSource::New(GetParameterString("out"), ogr::DataSource::Modes::Overwrite);
+        otb::ogr::Layer newLayer = output->CreateLayer(
+          GetParameterString("out"),
+          const_cast<OGRSpatialReference*>(layer.GetSpatialRef()),
+          layer.GetGeomType());
+        // Copy existing fields except the ones selected for reduction
+        for (const int& k : otherInputFields)
+          {
+          OGRFieldDefn fieldDefn(inLayerDefn.GetFieldDefn(k));
+          newLayer.CreateField(fieldDefn);
+          }
+        }
+      else if (GetParameterString("mode")=="update")
+        {
+        //output = ogr::DataSource::New(GetParameterString("out"), ogr::DataSource::Modes::Update_LayerCreateOnly);
+        // Update mode
+        otb::ogr::DataSource::Pointer source_output =
+          otb::ogr::DataSource::New(GetParameterString("out"), otb::ogr::DataSource::Modes::Read);
+        layer = source_output->GetLayer(0);
+        updateMode = true;
+        otbAppLogINFO("Update input vector data.");
+
+        // fill temporary buffer for the transfer
+        otb::ogr::Layer inputLayer = layer;
+        layer = buffer->CopyLayer(inputLayer, std::string("Buffer"));
+        // close input data source 
+        source_output->Clear();
+        // Re-open input data source in update mode
+        output = otb::ogr::DataSource::New(
+          GetParameterString("out"),
+          otb::ogr::DataSource::Modes::Update_LayerUpdate);
+        }
+      else
+        {
+        otbAppLogFATAL(<< "Error when creating the output file" <<
+          GetParameterString("mode") << " : unsupported writting mode type");
+        }
+      }
+
+    otb::ogr::Layer outLayer = output->GetLayer(0);    
+    OGRErr errStart = outLayer.ogr().StartTransaction();
+
+    if (errStart != OGRERR_NONE)
+      {
+      otbAppLogFATAL(<< "Unable to start transaction for OGR layer " << outLayer.ogr().GetName() << ".");
+      }
+
+    // Build the list of output fields
+    std::vector<std::string> outFields;
+    if(GetParameterString("featout") == "prefix")
+      {
+      std::string prefix = GetParameterString("featout.prefix.name");
+      std::ostringstream oss;
+      for (unsigned int i=0 ; i < m_Model->GetDimension() ; i++)
+        {
+        oss.str(prefix);
+        oss.seekp(0,std::ios_base::end);
+        oss << i;
+        outFields.push_back(oss.str());
+        }
+      }
+    else if(GetParameterString("featout") == "list")
+      {
+      outFields = GetParameterStringList("featout.list.names");
+      if (outFields.size() != m_Model->GetDimension())
+        {
+        otbAppLogFATAL( << "Wrong number of output field names, expected "
+          << m_Model->GetDimension() << " , got "<< outFields.size());
+        }
+      }
+    else
+      {
+      otbAppLogFATAL( << "Unsupported output feature mode : "
+        << GetParameterString("featout"));
+      }
+
+    // Add the field of prediction in the output layer if field not exist
+    for (unsigned int i=0; i<outFields.size() ;i++)
+      {
+      OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn();
+      int idx = layerDefn.GetFieldIndex(outFields[i].c_str());
+      
+      if (idx >= 0)
+        {
+        if (layerDefn.GetFieldDefn(idx)->GetType() != OFTReal)
+        otbAppLogFATAL("Field name "<< outFields[i]
+          << " already exists with a different type!");
+        }
+      else
+        {
+        OGRFieldDefn predictedField(outFields[i].c_str(), OFTReal);
+        ogr::FieldDefn predictedFieldDef(predictedField);
+        outLayer.CreateField(predictedFieldDef);
+        }
+      }
+
+    // Fill output layer
+    unsigned int count=0;
+    it = layer.cbegin();
+    itEnd = layer.cend();
+    for( ; it!=itEnd ; ++it, ++count)
+      {
+      ogr::Feature dstFeature(outLayer.GetLayerDefn());
+
+      dstFeature.SetFrom( *it , TRUE);
+      dstFeature.SetFID(it->GetFID());
+
+      for (std::size_t i=0; i<outFields.size(); ++i)
+        {
+        dstFeature[outFields[i]].SetValue<double>(target->GetMeasurementVector(count)[i]);
+        }
+      if (updateMode)
+        {
+        outLayer.SetFeature(dstFeature);
+        }
+      else
+        {
+        outLayer.CreateFeature(dstFeature);
+        }
+      }
+
+    if(outLayer.ogr().TestCapability("Transactions"))
+      {
+      const OGRErr errCommitX = outLayer.ogr().CommitTransaction();
+      if (errCommitX != OGRERR_NONE)
+        {
+        otbAppLogFATAL(<< "Unable to commit transaction for OGR layer " <<
+          outLayer.ogr().GetName() << ".");
+        }
+      }
+    output->SyncToDisk();
+    clock_t toc = clock();
+    otbAppLogINFO( "Elapsed: "<< ((double)(toc - tic) / CLOCKS_PER_SEC)<<" seconds.");
+    }
+
+  ModelPointerType m_Model;
+};
+
+} // end of namespace Wrapper
+} // end of namespace otb
+
+OTB_APPLICATION_EXPORT(otb::Wrapper::VectorDimensionalityReduction)
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx
new file mode 100644
index 0000000000000000000000000000000000000000..f474167e38be7fe6f2a8e56fc8838811afec789b
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainAutoencoder.txx
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbDimensionalityReductionTrainAutoencoder_txx
+#define otbDimensionalityReductionTrainAutoencoder_txx
+
+#include "otbTrainDimensionalityReductionApplicationBase.h"
+#include "otbAutoencoderModel.h"
+
+namespace otb
+{
+namespace Wrapper
+{
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::InitAutoencoderParams()
+{
+  AddChoice("algorithm.autoencoder", "Shark Autoencoder");
+  SetParameterDescription("algorithm.autoencoder",
+                          "This group of parameters allows setting Shark autoencoder parameters. "
+                          );
+
+  //Number Of Iterations
+  AddParameter(ParameterType_Int, "algorithm.autoencoder.nbiter",
+               "Maximum number of iterations during training");
+  SetParameterInt("algorithm.autoencoder.nbiter",100, false);
+  SetParameterDescription(
+    "algorithm.autoencoder.nbiter",
+    "The maximum number of iterations used during training.");
+
+  AddParameter(ParameterType_Int, "algorithm.autoencoder.nbiterfinetuning",
+               "Maximum number of iterations during training");
+  SetParameterInt("algorithm.autoencoder.nbiterfinetuning",0, false);
+  SetParameterDescription(
+    "algorithm.autoencoder.nbiterfinetuning",
+    "The maximum number of iterations used during fine tuning of the whole network.");
+
+  AddParameter(ParameterType_Float, "algorithm.autoencoder.epsilon",
+               "Epsilon");
+  SetParameterFloat("algorithm.autoencoder.epsilon",0, false);
+  SetParameterDescription(
+    "algorithm.autoencoder.epsilon",
+    "Epsilon");
+
+  AddParameter(ParameterType_Float, "algorithm.autoencoder.initfactor",
+               "Weight initialization factor");
+  SetParameterFloat("algorithm.autoencoder.initfactor",1, false);
+  SetParameterDescription(
+    "algorithm.autoencoder.initfactor", "Parameter that control the weight initialization of the autoencoder");
+
+   //Number Of Hidden Neurons
+  AddParameter(ParameterType_StringList, "algorithm.autoencoder.nbneuron", "Size");
+  SetParameterDescription(
+    "algorithm.autoencoder.nbneuron",
+    "The number of neurons in each hidden layer.");
+
+  //Regularization
+  AddParameter(ParameterType_StringList, "algorithm.autoencoder.regularization", "Strength of the regularization");
+  SetParameterDescription("algorithm.autoencoder.regularization", 
+                          "Strength of the L2 regularization used during training");
+
+  //Noise strength
+  AddParameter(ParameterType_StringList, "algorithm.autoencoder.noise", "Strength of the noise");
+  SetParameterDescription("algorithm.autoencoder.noise", 
+                          "Strength of the noise");
+
+  // Sparsity parameter
+  AddParameter(ParameterType_StringList, "algorithm.autoencoder.rho", "Sparsity parameter");
+  SetParameterDescription("algorithm.autoencoder.rho", 
+                          "Sparsity parameter");
+  
+  // Sparsity regularization strength
+  AddParameter(ParameterType_StringList, "algorithm.autoencoder.beta", "Sparsity regularization strength");
+  SetParameterDescription("algorithm.autoencoder.beta", 
+                          "Sparsity regularization strength");
+                         
+  AddParameter(ParameterType_OutputFilename, "algorithm.autoencoder.learningcurve", "Learning curve");
+  SetParameterDescription("algorithm.autoencoder.learningcurve", "Learning error values");
+  MandatoryOff("algorithm.autoencoder.learningcurve");
+}
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::BeforeTrainAutoencoder(typename ListSampleType::Pointer trainingListSample,
+                         std::string modelPath)
+{
+  typedef shark::LogisticNeuron NeuronType;
+  typedef otb::AutoencoderModel<InputValueType, NeuronType> AutoencoderModelType;
+  TrainAutoencoder<AutoencoderModelType>(trainingListSample,modelPath);
+}
+
+template <class TInputValue, class TOutputValue>
+template <typename autoencoderchoice>
+void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::TrainAutoencoder(typename ListSampleType::Pointer trainingListSample,std::string modelPath)
+{
+    typename autoencoderchoice::Pointer dimredTrainer = autoencoderchoice::New();
+    itk::Array<unsigned int> nb_neuron;
+    itk::Array<float> noise;
+    itk::Array<float> regularization;
+    itk::Array<float> rho;
+    itk::Array<float> beta;
+    std::vector<std::basic_string<char>> s_nbneuron= GetParameterStringList("algorithm.autoencoder.nbneuron");
+    std::vector<std::basic_string<char>> s_noise= GetParameterStringList("algorithm.autoencoder.noise");
+    std::vector<std::basic_string<char>> s_regularization= GetParameterStringList("algorithm.autoencoder.regularization");
+    std::vector<std::basic_string<char>> s_rho= GetParameterStringList("algorithm.autoencoder.rho");
+    std::vector<std::basic_string<char>> s_beta= GetParameterStringList("algorithm.autoencoder.beta");
+    nb_neuron.SetSize(s_nbneuron.size());
+    noise.SetSize(s_nbneuron.size());
+    regularization.SetSize(s_nbneuron.size());
+    rho.SetSize(s_nbneuron.size());
+    beta.SetSize(s_nbneuron.size());
+    for (unsigned int i=0; i<s_nbneuron.size(); i++)
+      {
+      nb_neuron[i]=std::stoi(s_nbneuron[i]);
+      noise[i]=std::stof(s_noise[i]);
+      regularization[i]=std::stof(s_regularization[i]);
+      rho[i]=std::stof(s_rho[i]);
+      beta[i]=std::stof(s_beta[i]);
+      }
+    dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron);
+    dimredTrainer->SetNumberOfIterations(GetParameterInt("algorithm.autoencoder.nbiter"));
+    dimredTrainer->SetNumberOfIterationsFineTuning(GetParameterInt("algorithm.autoencoder.nbiterfinetuning"));
+    dimredTrainer->SetEpsilon(GetParameterFloat("algorithm.autoencoder.epsilon"));
+    dimredTrainer->SetInitFactor(GetParameterFloat("algorithm.autoencoder.initfactor"));
+    dimredTrainer->SetRegularization(regularization);
+    dimredTrainer->SetNoise(noise);
+    dimredTrainer->SetRho(rho);
+    dimredTrainer->SetBeta(beta);
+    dimredTrainer->SetWriteWeights(true);
+    if (HasValue("algorithm.autoencoder.learningcurve") &&
+        IsParameterEnabled("algorithm.autoencoder.learningcurve"))
+      {
+      dimredTrainer->SetWriteLearningCurve(true);
+      dimredTrainer->SetLearningCurveFileName(GetParameterString("algorithm.autoencoder.learningcurve"));
+      }
+
+    dimredTrainer->SetInputListSample(trainingListSample);
+    dimredTrainer->Train();
+    dimredTrainer->Save(modelPath);
+}
+
+} //end namespace wrapper
+} //end namespace otb
+
+#endif
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainPCA.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainPCA.txx
new file mode 100644
index 0000000000000000000000000000000000000000..03016916cb0186d118f57a95ee726b80f69486d5
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainPCA.txx
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbDimensionalityReductionTrainPCA_txx
+#define otbDimensionalityReductionTrainPCA_txx
+
+#include "otbTrainDimensionalityReductionApplicationBase.h"
+#include "otbPCAModel.h"
+
+namespace otb
+{
+namespace Wrapper
+{
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::InitPCAParams()
+{
+  AddChoice("algorithm.pca", "Shark PCA");
+  SetParameterDescription("algorithm.pca",
+                          "This group of parameters allows setting Shark PCA parameters. "
+                          );
+
+   //Output Dimension
+  AddParameter(ParameterType_Int, "algorithm.pca.dim",
+               "Dimension of the output of the pca transformation");
+  SetParameterInt("algorithm.pca.dim",10, false);
+  SetParameterDescription(
+    "algorithm.pca.dim",
+    "Dimension of the output of the pca transformation.");
+}
+
+template <class TInputValue, class TOutputValue>
+void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::TrainPCA(typename ListSampleType::Pointer trainingListSample,std::string modelPath)
+{
+    typedef otb::PCAModel<InputValueType> PCAModelType;
+    typename PCAModelType::Pointer dimredTrainer = PCAModelType::New();
+    dimredTrainer->SetDimension(GetParameterInt("algorithm.pca.dim"));
+    dimredTrainer->SetInputListSample(trainingListSample);
+    dimredTrainer->SetWriteEigenvectors(true);
+    dimredTrainer->Train();
+    dimredTrainer->Save(modelPath);
+}
+
+} //end namespace wrapper
+} //end namespace otb
+
+#endif
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx
new file mode 100644
index 0000000000000000000000000000000000000000..51cdd9e1acf1ededba9b54f93260e10c16962572
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbDimensionalityReductionTrainSOM.txx
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbDimensionalityReductionTrainSOM_txx
+#define otbDimensionalityReductionTrainSOM_txx
+#include "otbTrainDimensionalityReductionApplicationBase.h"
+#include "otbSOMModel.h"
+
+namespace otb
+{
+namespace Wrapper
+{
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::InitSOMParams()
+{
+  AddChoice("algorithm.som", "OTB SOM");
+  SetParameterDescription("algorithm.som",
+    "This group of parameters allows setting SOM parameters. ");
+
+  AddParameter(ParameterType_StringList , "algorithm.som.s", "Map size");
+  SetParameterDescription("algorithm.som.s", "Sizes of the SOM map (one per "
+    "dimension). For instance, [12;15] means a 2D map of size 12x15. Support"
+    "2D to 5D maps.");
+  MandatoryOff("algorithm.som.s");
+
+  AddParameter(ParameterType_StringList , "algorithm.som.n", "Neighborhood sizes");
+  SetParameterDescription("algorithm.som.n", "Sizes of the initial neighborhood "
+    "in the SOM map (one per dimension). The number of sizes should be the same"
+    " as the map sizes");
+  MandatoryOff("algorithm.som.n");
+
+  AddParameter(ParameterType_Int, "algorithm.som.ni", "NumberIteration");
+  SetParameterDescription("algorithm.som.ni", "Number of iterations for SOM learning");
+  MandatoryOff("algorithm.som.ni");
+
+  AddParameter(ParameterType_Float, "algorithm.som.bi", "BetaInit");
+  SetParameterDescription("algorithm.som.bi", "Initial learning coefficient");
+  MandatoryOff("algorithm.som.bi");
+
+  AddParameter(ParameterType_Float, "algorithm.som.bf", "BetaFinal");
+  SetParameterDescription("algorithm.som.bf", "Final learning coefficient");
+  MandatoryOff("algorithm.som.bf");
+
+  AddParameter(ParameterType_Float, "algorithm.som.iv", "InitialValue");
+  SetParameterDescription("algorithm.som.iv", "Maximum initial neuron weight");
+  MandatoryOff("algorithm.som.iv");
+
+  std::vector<std::string> size(2, std::string("10"));
+  std::vector<std::string> radius(2, std::string("3"));
+  SetParameterStringList("algorithm.som.s", size, false);
+  SetParameterStringList("algorithm.som.n", radius, false);
+  DisableParameter("algorithm.som.s");
+  DisableParameter("algorithm.som.n");
+
+  SetDefaultParameterInt("algorithm.som.ni", 5);
+  SetDefaultParameterFloat("algorithm.som.bi", 1.0);
+  SetDefaultParameterFloat("algorithm.som.bf", 0.1);
+  SetDefaultParameterFloat("algorithm.som.iv", 10.0);
+}
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::BeforeTrainSOM(typename ListSampleType::Pointer trainingListSample,
+        std::string modelPath)
+{
+  std::vector<std::string> s = GetParameterStringList("algorithm.som.s");
+  int SomDim = s.size();
+
+  if(SomDim == 2)
+    {
+    typedef otb::SOMModel<InputValueType, 2> SOM2DModelType;
+    TrainSOM<SOM2DModelType >(trainingListSample,modelPath);
+    }
+
+  if(SomDim == 3)
+    {
+    typedef otb::SOMModel<InputValueType, 3> SOM3DModelType;
+    TrainSOM<SOM3DModelType >(trainingListSample,modelPath);
+    }
+
+  if(SomDim == 4)
+    {
+    typedef otb::SOMModel<InputValueType, 4> SOM4DModelType;
+    TrainSOM<SOM4DModelType >(trainingListSample,modelPath);
+    }
+
+  if(SomDim == 5)
+    {
+    typedef otb::SOMModel<InputValueType, 5> SOM5DModelType;
+    TrainSOM<SOM5DModelType >(trainingListSample,modelPath);
+    }
+  if(SomDim > 5 || SomDim < 2)
+    {
+    otbAppLogFATAL(<< "Invalid number of dimensions : " << SomDim <<
+      ". Only support 2, 3, 4 or 5 dimensions");
+    }
+}
+
+template <class TInputValue, class TOutputValue>
+template <typename TSOM>
+void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::TrainSOM(typename ListSampleType::Pointer trainingListSample,std::string modelPath)
+{
+  typename TSOM::Pointer dimredTrainer = TSOM::New();
+  dimredTrainer->SetNumberOfIterations(GetParameterInt("algorithm.som.ni"));
+  dimredTrainer->SetBetaInit(GetParameterFloat("algorithm.som.bi"));
+  dimredTrainer->SetWriteMap(true);
+  dimredTrainer->SetBetaEnd(GetParameterFloat("algorithm.som.bf"));
+  dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv"));
+  typename TSOM::SizeType size;
+  std::vector<std::string> s = GetParameterStringList("algorithm.som.s");
+  for (unsigned int i=0; i<s.size(); i++)
+    {
+    size[i]=boost::lexical_cast<unsigned int>(s[i]);
+    }
+
+  dimredTrainer->SetMapSize(size);
+  typename TSOM::SizeType radius;
+  std::vector<std::string> n = GetParameterStringList("algorithm.som.n");
+  if (n.size() != s.size())
+    {
+    otbAppLogFATAL(<< "Wrong number of neighborhood radii : expected "<< s.size() << " ; got "<< n.size());
+    }
+  for (unsigned int i=0; i < n.size(); i++)
+    {
+    radius[i]=boost::lexical_cast<unsigned int>(n[i]);
+    }
+  dimredTrainer->SetNeighborhoodSizeInit(radius);
+  dimredTrainer->SetInputListSample(trainingListSample);
+  dimredTrainer->Train();
+  dimredTrainer->Save(modelPath);
+}
+
+} //end namespace wrapper
+} //end namespace otb
+
+#endif
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.h b/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.h
new file mode 100644
index 0000000000000000000000000000000000000000..b0a2ae847ac0595c1744c73117a9c4d711bc93a9
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbTrainDimensionalityReductionApplicationBase_h
+#define otbTrainDimensionalityReductionApplicationBase_h
+
+#include "otbConfigure.h"
+#include "otbWrapperApplication.h"
+#include "otbDimensionalityReductionModelFactory.h"
+
+// ListSample
+#include "itkListSample.h"
+#include "itkVariableLengthVector.h"
+
+#include <iostream>
+
+namespace otb
+{
+namespace Wrapper
+{
+
+/** \class LearningApplicationBase
+ *  \brief LearningApplicationBase is the base class for application that
+ *         use machine learning model.
+ *
+ * This base class offers a DoInit() method to initialize all the parameters
+ * related to machine learning models. They will all be in the choice parameter
+ * named "classifier". The class also offers generic Train() and Classify()
+ * methods. The classes derived from LearningApplicationBase only need these
+ * 3 methods to handle the machine learning model.
+ *
+ * There are multiple machine learning models in OTB, some imported
+ * from OpenCV and one imported from LibSVM. They all have
+ * different parameters. The purpose of this class is to handle the
+ * creation of all parameters related to machine learning models (in
+ * DoInit() ), and to dispatch the calls to specific train functions
+ * in function Train().
+ *
+ * This class is templated over scalar types for input and output values.
+ * Typically, the input value type will be either float of double. The choice
+ * of an output value type depends on the learning mode. This base class
+ * supports both classification and regression modes. For classification
+ * (enabled by default), the output value type corresponds to a class
+ * identifier so integer types suit well. For regression, the output value
+ * should not be an integer type, but rather a floating point type. In addition,
+ * an application deriving this base class for regression should initialize
+ * the m_RegressionFlag to true in their constructor.
+ *
+ * \sa TrainImagesClassifier
+ * \sa TrainRegression
+ *
+ * \ingroup OTBAppDimensionalityReduction
+ */
+template <class TInputValue, class TOutputValue>
+class TrainDimensionalityReductionApplicationBase: public Application
+{
+public:
+	/** Standard class typedefs. */
+  typedef TrainDimensionalityReductionApplicationBase Self;
+  typedef Application             Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Standard macro */
+  itkTypeMacro(TrainDimensionalityReductionApplicationBase, otb::Application)
+
+  typedef TInputValue                             InputValueType;
+  typedef TOutputValue                            OutputValueType;
+
+  typedef otb::VectorImage<InputValueType>        SampleImageType;
+  typedef typename SampleImageType::PixelType     PixelType;
+
+  typedef otb::DimensionalityReductionModelFactory<
+    InputValueType, OutputValueType>             ModelFactoryType;
+  typedef typename ModelFactoryType::DimensionalityReductionModelTypePointer ModelPointerType;
+  typedef typename ModelFactoryType::DimensionalityReductionModelType        ModelType;
+	  
+  typedef typename ModelType::InputSampleType     SampleType;
+  typedef typename ModelType::InputListSampleType ListSampleType;
+  
+protected:
+  TrainDimensionalityReductionApplicationBase();
+  ~TrainDimensionalityReductionApplicationBase() override;
+
+  /** Generic method to train and save the machine learning model. This method
+     * uses specific train methods depending on the chosen model.*/
+  void Train(typename ListSampleType::Pointer trainingListSample,
+             std::string modelPath);
+
+  /** Generic method to load a model file and use it to classify a sample list*/
+  void Reduce(typename ListSampleType::Pointer validationListSample,
+              std::string modelPath);
+
+  /** Init method that creates all the parameters for machine learning models */
+  void DoInit() override;
+
+private:
+
+  /** Specific Init and Train methods for each machine learning model */
+
+  void InitSOMParams();
+  template <class somchoice>
+  void TrainSOM(typename ListSampleType::Pointer trainingListSample, std::string modelPath);
+  void BeforeTrainSOM(typename ListSampleType::Pointer trainingListSample, std::string modelPath);
+
+#ifdef OTB_USE_SHARK
+  void InitAutoencoderParams();
+  void InitPCAParams();
+  
+  void BeforeTrainAutoencoder(typename ListSampleType::Pointer trainingListSample, std::string modelPath);
+  template <class autoencoderchoice>
+  void TrainAutoencoder(typename ListSampleType::Pointer trainingListSample, std::string modelPath);
+  
+  void TrainPCA(typename ListSampleType::Pointer trainingListSample, std::string modelPath);
+#endif
+};
+
+} // end of namespace Wrapper
+} // end of namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbTrainDimensionalityReductionApplicationBase.txx"
+#include "otbDimensionalityReductionTrainSOM.txx"
+
+#ifdef OTB_USE_SHARK
+#include "otbDimensionalityReductionTrainAutoencoder.txx"
+#include "otbDimensionalityReductionTrainPCA.txx"
+#endif
+#endif
+
+#endif
diff --git a/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.txx b/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.txx
new file mode 100644
index 0000000000000000000000000000000000000000..057541017693139268c1039820dc3febde93562b
--- /dev/null
+++ b/Modules/Applications/AppDimensionalityReduction/include/otbTrainDimensionalityReductionApplicationBase.txx
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbTrainDimensionalityReductionApplicationBase_txx
+#define otbTrainDimensionalityReductionApplicationBase_txx
+
+#include "otbTrainDimensionalityReductionApplicationBase.h"
+
+namespace otb
+{
+namespace Wrapper
+{
+
+template <class TInputValue, class TOutputValue>
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::TrainDimensionalityReductionApplicationBase() 
+{
+}
+
+template <class TInputValue, class TOutputValue>
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::~TrainDimensionalityReductionApplicationBase()
+{
+  ModelFactoryType::CleanFactories();
+}
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::DoInit()
+{
+  AddDocTag(Tags::Learning);
+
+  // main choice parameter that will contain all dimensionality reduction options
+  AddParameter(ParameterType_Choice, "algorithm", "algorithm to use for the training");
+  SetParameterDescription("algorithm", "Choice of the dimensionality reduction "
+    "algorithm to use for the training.");
+
+  InitSOMParams();
+  
+#ifdef OTB_USE_SHARK
+  InitAutoencoderParams();
+  InitPCAParams();
+#endif
+  
+}
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::Reduce(typename ListSampleType::Pointer /*validationListSample*/,std::string /*modelPath*/)
+{
+}
+
+template <class TInputValue, class TOutputValue>
+void
+TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
+::Train(
+  typename ListSampleType::Pointer trainingListSample,
+  std::string modelPath)
+{
+  // get the name of the chosen machine learning model
+  const std::string modelName = GetParameterString("algorithm");
+  // call specific train function
+
+  if(modelName == "som")
+    {
+    BeforeTrainSOM(trainingListSample,modelPath);
+    }
+
+ if(modelName == "autoencoder")
+    {
+#ifdef OTB_USE_SHARK
+    BeforeTrainAutoencoder(trainingListSample,modelPath);
+#else
+    otbAppLogFATAL("Module SharkLearning is not installed. You should consider turning OTB_USE_SHARK on during cmake configuration.");
+#endif
+    }
+
+  if(modelName == "pca")
+    {
+#ifdef OTB_USE_SHARK
+    TrainPCA(trainingListSample,modelPath);
+#else
+    otbAppLogFATAL("Module SharkLearning is not installed. You should consider turning OTB_USE_SHARK on during cmake configuration.");
+#endif
+    }
+}
+
+} // end of namespace Wrapper
+} // end of namespace otb
+
+#endif
diff --git a/Modules/Applications/AppDimensionalityReduction/otb-module.cmake b/Modules/Applications/AppDimensionalityReduction/otb-module.cmake
index 2ee3b794a6d9741a7b7cb1a5ed15b6620702a77e..4b1a936de279cec5f16b1e9a7eb4c7839dc17379 100644
--- a/Modules/Applications/AppDimensionalityReduction/otb-module.cmake
+++ b/Modules/Applications/AppDimensionalityReduction/otb-module.cmake
@@ -25,9 +25,11 @@ otb_module(OTBAppDimensionalityReduction
   DEPENDS
     OTBImageManipulation
     OTBStatistics
+    OTBIOXML
     OTBApplicationEngine
     OTBDimensionalityReduction
-  TEST_DEPENDS
+    OTBDimensionalityReductionLearning
+    TEST_DEPENDS
     OTBTestKernel
     OTBCommandLine
   
diff --git a/Modules/Applications/AppDimensionalityReduction/test/CMakeLists.txt b/Modules/Applications/AppDimensionalityReduction/test/CMakeLists.txt
index 78737f6b82196796969f11363608af24814f5a62..0ec91c997978405bee777e5c9b230cbc9f977350 100644
--- a/Modules/Applications/AppDimensionalityReduction/test/CMakeLists.txt
+++ b/Modules/Applications/AppDimensionalityReduction/test/CMakeLists.txt
@@ -38,3 +38,58 @@ otb_test_application(NAME   apTvFEDimensionalityReductionPCA
                              ${BASELINE}/bfTvPCAImageFilter3.tif
                              ${TEMP}/apTvChDimensionalityReductionPCA.tif)
 
+#-------------------------------------------------------------------------------
+set(algos ae pca som)
+
+set(ae_params
+-algorithm autoencoder
+-algorithm.autoencoder.nbneuron 8
+-algorithm.autoencoder.regularization 0.01
+-algorithm.autoencoder.noise 0
+-algorithm.autoencoder.rho 0
+-algorithm.autoencoder.beta 0)
+
+set(pca_params
+-algorithm pca
+-algorithm.pca.dim 8)
+
+set(som_params
+-algorithm som
+-algorithm.som.s 10 10
+-algorithm.som.n 3 3
+-algorithm.som.ni 10)
+
+foreach(algo ${algos})
+  string(TOUPPER ${algo} ualgo)
+  #------------------ TrainDimensionalityReduction TESTS------------------------
+  otb_test_application(NAME apTvDrTrainDimensionalityReduction${ualgo}
+    APP TrainDimensionalityReduction
+    OPTIONS -io.vd ${INPUTDATA}/cuprite_samples.sqlite
+            -io.out ${TEMP}/cuprite_DRModel.${algo}
+            -io.stats ${INPUTDATA}/cupriteStats.xml
+            -feat value_0 value_1 value_2 value_3 value_4 value_5 value_6 value_7 value_8 value_9
+            ${${algo}_params})
+
+  #------------------ ImageDimensionalityReduction TESTS------------------------
+  otb_test_application(NAME apTvDrImageDimensionalityReduction${ualgo}
+    APP ImageDimensionalityReduction
+    OPTIONS -in ${INPUTDATA}/cupriteSubHsi.tif
+            -model ${TEMP}/cuprite_DRModel.${algo}
+            -imstat ${INPUTDATA}/cupriteStats.xml
+            -out ${TEMP}/cupriteReduced_${algo}.tif)
+
+  set_tests_properties( apTvDrImageDimensionalityReduction${ualgo}
+    PROPERTIES DEPENDS apTvDrTrainDimensionalityReduction${ualgo})
+
+  #------------------ VectorDimensionalityReduction TESTS-----------------------
+  otb_test_application(NAME apTvDrVectorDimensionalityReduction${ualgo}
+    APP VectorDimensionalityReduction
+    OPTIONS -in ${INPUTDATA}/cuprite_samples.sqlite
+            -model ${TEMP}/cuprite_DRModel.${algo}
+            -instat ${INPUTDATA}/cupriteStats.xml
+            -out ${TEMP}/cupriteReduced_${algo}.sqlite
+            -feat value_0 value_1 value_2 value_3 value_4 value_5 value_6 value_7 value_8 value_9)
+
+  set_tests_properties( apTvDrVectorDimensionalityReduction${ualgo}
+    PROPERTIES DEPENDS apTvDrTrainDimensionalityReduction${ualgo})
+endforeach()
diff --git a/Modules/IO/TestKernel/include/otbReadDataFile.h b/Modules/IO/TestKernel/include/otbReadDataFile.h
new file mode 100644
index 0000000000000000000000000000000000000000..0e7174be6ec50038bb217f4248cbd5be604d6566
--- /dev/null
+++ b/Modules/IO/TestKernel/include/otbReadDataFile.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "otbStringUtils.h"
+#include "otb_boost_string_header.h"
+
+#include "itkListSample.h"
+#include <fstream>
+#include <string>
+#include <algorithm>
+
+namespace otb
+{
+/** Utility function to read the data file letter.scale, a CSV type file
+ *  (whitespace separators) with the letter index in first column, followed by
+ *  16 descriptors. Each descriptor is a pair 'index:value'
+ */
+template <typename TInput, typename TTarget>
+bool ReadDataFile(
+  const std::string & infname,
+  itk::SmartPointer<itk::Statistics::ListSample<TInput> > samples,
+  itk::SmartPointer<itk::Statistics::ListSample<TTarget> > labels)
+{
+  typedef typename itk::Statistics::ListSample<TInput>::MeasurementType IValueType;
+  typedef typename itk::Statistics::ListSample<TTarget>::MeasurementType TValueType;
+
+  std::ifstream ifs;
+  ifs.open(infname.c_str());
+
+  if(!ifs)
+    {
+    std::cout<<"Could not read file "<<infname<<std::endl;
+    return false;
+    }
+
+  labels->SetMeasurementVectorSize(1);
+  unsigned int nbfeatures = 0;
+
+  while (!ifs.eof())
+    {
+    std::string line;
+    std::getline(ifs, line);
+    boost::algorithm::trim(line);
+
+    if(nbfeatures == 0)
+      {
+      nbfeatures = std::count(line.begin(),line.end(),' ');
+      samples->SetMeasurementVectorSize(nbfeatures);
+      }
+
+    if(line.size()>1)
+      {
+      TInput sample;
+      itk::NumericTraits<TInput>::SetLength(sample, nbfeatures);
+
+      std::string::size_type pos = line.find_first_of(" ", 0);
+
+      // Parse label
+      TTarget label;
+      itk::NumericTraits<TTarget>::SetLength(label,1);
+      label[0] = boost::lexical_cast<TValueType>(line.substr(0, pos));
+
+      bool endOfLine = false;
+      unsigned int id = 0;
+
+      while(!endOfLine)
+        {
+        std::string::size_type nextpos = line.find_first_of(" ", pos+1);
+        if(nextpos == std::string::npos)
+          {
+          endOfLine = true;
+          nextpos = line.size();
+          }
+
+        std::string::size_type semicolonpos = line.find_first_of(":", pos+1, nextpos-pos-1);
+        if (semicolonpos == std::string::npos)
+          {
+          id++;
+          sample[id - 1] = boost::lexical_cast<IValueType>(line.substr(pos+1,nextpos-pos-1));
+          }
+        else
+          {
+          id = boost::lexical_cast<unsigned int>(line.substr(pos+1,semicolonpos-pos-1));
+          sample[id - 1] = boost::lexical_cast<IValueType>(
+            line.substr(semicolonpos+1,nextpos-semicolonpos-1));
+          }
+        pos = nextpos;
+
+        }
+      samples->PushBack(sample);
+      labels->PushBack(label);
+      }
+    }
+
+  ifs.close();
+  return true;
+}
+
+} // end of namespace otb
diff --git a/Modules/Learning/DimensionalityReductionLearning/CMakeLists.txt b/Modules/Learning/DimensionalityReductionLearning/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..79b6f276d592e5f9f1c41c03ff95589982b8cb76
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/CMakeLists.txt
@@ -0,0 +1,23 @@
+#
+# Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+#
+# This file is part of Orfeo Toolbox
+#
+#     https://www.orfeo-toolbox.org/
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+project(OTBDimensionalityReduction)
+
+otb_module_impl()
diff --git a/Modules/Learning/DimensionalityReductionLearning/README.md b/Modules/Learning/DimensionalityReductionLearning/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a46b8fbf8af741f44690a747f973b08a5114e2d0
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/README.md
@@ -0,0 +1,25 @@
+This module contains a new dimensionality reduction framework for the Orfeo Toolbox.
+
+The framework is based on Machine learning models and a dimensionality reduction algorithm 
+can be trained and used using the model class, in the same fashion as Machine Learning Models
+for Classification (supervised or unspervised) and Regression.
+
+The algorithms featured in the module are (27/06/2017) :
+ - autoencoders and multi layer autoencoders, with several regularization options
+ - PCA 
+ - SOM 
+ 
+ Autoencoders and PCA models are using Shark ML library, while SOM model is based on the SOM classes of the OTB.
+ 
+ More specifically, the module contains :
+ 
+ - Autoencoder models, PCA models and SOM models, with methods for training, serialization and prediction (i.e. reduction)
+ - A Dimensionality Reduction Model Factory and a factories for each model, which allow the user to create objects of the model classes
+ - A (OTB ImageToImage) filter that can be used to perform dimensionality reduction on an image. This filter supports threading and streaming
+ - An application for training the models according to a shapefile
+ - An application for using a trained model on a shapefile
+ - An application for using a trained model on an image (using the filter)
+ 
+			/!\ Work In Progress /!\
+ 
+
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
new file mode 100644
index 0000000000000000000000000000000000000000..14205f0cd81aa2053d22b7bf90bfc78b5bebfe39
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbAutoencoderModel_h
+#define otbAutoencoderModel_h
+
+#include "otbMachineLearningModelTraits.h"
+#include "otbMachineLearningModel.h"
+#include <fstream>
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+#endif
+#include "otb_shark.h"
+#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
+#include <shark/Models/FFNet.h>
+#include <shark/Models/Autoencoder.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+namespace otb
+{
+/**
+ * \class AutoencoderModel
+ *
+ * Autoencoder model wrapper class
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, class NeuronType>
+class ITK_EXPORT AutoencoderModel
+  : public  MachineLearningModel<
+    itk::VariableLengthVector< TInputValue>,
+    itk::VariableLengthVector< TInputValue> >
+{
+public:
+  typedef AutoencoderModel Self;
+  typedef MachineLearningModel<
+    itk::VariableLengthVector< TInputValue>,
+    itk::VariableLengthVector< TInputValue> > Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  typedef typename Superclass::InputValueType       InputValueType;
+  typedef typename Superclass::InputSampleType      InputSampleType;
+  typedef typename Superclass::InputListSampleType    InputListSampleType;
+  typedef typename InputListSampleType::Pointer       ListSamplePointerType;
+  typedef typename Superclass::TargetValueType      TargetValueType;
+  typedef typename Superclass::TargetSampleType       TargetSampleType;
+  typedef typename Superclass::TargetListSampleType     TargetListSampleType;
+
+  /// Confidence map related typedefs
+  typedef typename Superclass::ConfidenceValueType          ConfidenceValueType;
+  typedef typename Superclass::ConfidenceSampleType         ConfidenceSampleType;
+  typedef typename Superclass::ConfidenceListSampleType         ConfidenceListSampleType;
+
+  /// Neural network related typedefs
+  typedef shark::Autoencoder<NeuronType,shark::LinearNeuron> OutAutoencoderType;
+  typedef shark::Autoencoder<NeuronType,NeuronType> AutoencoderType;
+  typedef shark::FFNet<NeuronType,shark::LinearNeuron> NetworkType;
+
+  itkNewMacro(Self);
+  itkTypeMacro(AutoencoderModel, DimensionalityReductionModel);
+
+  itkGetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
+  itkSetMacro(NumberOfHiddenNeurons,itk::Array<unsigned int>);
+
+  itkGetMacro(NumberOfIterations,unsigned int);
+  itkSetMacro(NumberOfIterations,unsigned int);
+
+  itkGetMacro(NumberOfIterationsFineTuning,unsigned int);
+  itkSetMacro(NumberOfIterationsFineTuning,unsigned int);
+
+  itkGetMacro(Epsilon,double);
+  itkSetMacro(Epsilon,double);
+
+  itkGetMacro(InitFactor,double);
+  itkSetMacro(InitFactor,double);
+
+  itkGetMacro(Regularization,itk::Array<double>);
+  itkSetMacro(Regularization,itk::Array<double>);
+
+  itkGetMacro(Noise,itk::Array<double>);
+  itkSetMacro(Noise,itk::Array<double>);
+
+  itkGetMacro(Rho,itk::Array<double>);
+  itkSetMacro(Rho,itk::Array<double>);
+
+  itkGetMacro(Beta,itk::Array<double>);
+  itkSetMacro(Beta,itk::Array<double>);
+
+  itkGetMacro(WriteLearningCurve,bool);
+  itkSetMacro(WriteLearningCurve,bool);
+
+  itkSetMacro(WriteWeights, bool);
+  itkGetMacro(WriteWeights, bool);
+
+  itkGetMacro(LearningCurveFileName,std::string);
+  itkSetMacro(LearningCurveFileName,std::string);
+
+  bool CanReadFile(const std::string & filename);
+  bool CanWriteFile(const std::string & filename);
+
+  void Save(const std::string & filename, const std::string & name="")  ITK_OVERRIDE;
+  void Load(const std::string & filename, const std::string & name="")  ITK_OVERRIDE;
+
+  void Train() ITK_OVERRIDE;
+
+  template <class T, class Autoencoder>
+  void TrainOneLayer(
+    shark::AbstractStoppingCriterion<T> & criterion,
+    Autoencoder &,
+    unsigned int,
+    shark::Data<shark::RealVector> &,
+    std::ostream&);
+
+  template <class T, class Autoencoder>
+  void TrainOneSparseLayer(
+    shark::AbstractStoppingCriterion<T> & criterion,
+    Autoencoder &,
+    unsigned int,
+    shark::Data<shark::RealVector> &,
+    std::ostream&);
+
+  template <class T>
+  void TrainNetwork(
+    shark::AbstractStoppingCriterion<T> & criterion,
+    shark::Data<shark::RealVector> &,
+    std::ostream&);
+
+protected:
+  AutoencoderModel();
+  ~AutoencoderModel() ITK_OVERRIDE;
+
+  virtual TargetSampleType DoPredict(
+    const InputSampleType& input,
+    ConfidenceValueType * quality = ITK_NULLPTR) const;
+
+  virtual void DoPredictBatch(
+    const InputListSampleType *,
+    const unsigned int & startIndex,
+    const unsigned int & size,
+    TargetListSampleType *,
+    ConfidenceListSampleType * quality = ITK_NULLPTR) const;
+
+private:
+  /** Internal Network */
+  NetworkType m_Net;
+  itk::Array<unsigned int> m_NumberOfHiddenNeurons;
+  /** Training parameters */
+  unsigned int m_NumberOfIterations; // stop the training after a fixed number of iterations
+  unsigned int m_NumberOfIterationsFineTuning; // stop the fine tuning after a fixed number of iterations
+  double m_Epsilon; // Stops the training when the training error seems to converge
+  itk::Array<double> m_Regularization;  // L2 Regularization parameter
+  itk::Array<double> m_Noise;  // probability for an input to be set to 0 (denosing autoencoder)
+  itk::Array<double> m_Rho; // Sparsity parameter
+  itk::Array<double> m_Beta; // Sparsity regularization parameter
+  double m_InitFactor; // Weight initialization factor (the weights are intialized at m_initfactor/sqrt(inputDimension)  )
+
+  bool m_WriteLearningCurve; // Flag for writting the learning curve into a txt file
+  std::string m_LearningCurveFileName; // Name of the output learning curve printed after training
+  bool m_WriteWeights;
+};
+} // end namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbAutoencoderModel.txx"
+#endif
+
+#endif
+
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
new file mode 100644
index 0000000000000000000000000000000000000000..33f1c28e247c43f80ac28a1d608b1c15967c6a5e
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbAutoencoderModel_txx
+#define otbAutoencoderModel_txx
+
+#include "otbAutoencoderModel.h"
+#include "otbMacro.h"
+
+#include <fstream>
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
+#include "otbSharkUtils.h"
+//include train function
+#include <shark/ObjectiveFunctions/ErrorFunction.h>
+#include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
+
+#include <shark/Algorithms/GradientDescent/Rprop.h>// the RProp optimization algorithm
+#include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
+#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
+#include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
+#include <shark/Models/ConcatenatedModel.h>//to concatenate the noise with the model
+
+#include <shark/Algorithms/StoppingCriteria/MaxIterations.h> //A simple stopping criterion that stops after a fixed number of iterations
+#include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
+
+#include <shark/Algorithms/GradientDescent/SteepestDescent.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+namespace otb
+{
+
+template <class TInputValue, class NeuronType>
+AutoencoderModel<TInputValue,NeuronType>::AutoencoderModel()
+{
+  this->m_IsDoPredictBatchMultiThreaded = true;
+  this->m_WriteLearningCurve = false;
+}
+
+template <class TInputValue, class NeuronType>
+AutoencoderModel<TInputValue,NeuronType>::~AutoencoderModel()
+{
+}
+
+template <class TInputValue, class NeuronType>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::Train()
+{
+  std::vector<shark::RealVector> features;
+  Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+  shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+  shark::Data<shark::RealVector> inputSamples_copy = inputSamples;
+
+  std::ofstream ofs;
+  if (this->m_WriteLearningCurve == true)
+    {
+    ofs.open(m_LearningCurveFileName);
+    ofs << "learning curve" << std::endl;
+    }
+
+  // Initialization of the feed forward neural network
+  std::vector<size_t> layers;
+  layers.push_back(shark::dataDimension(inputSamples));
+  for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+    {
+    layers.push_back(m_NumberOfHiddenNeurons[i]);
+    }
+
+  for (unsigned int i = std::max(0,static_cast<int>(m_NumberOfHiddenNeurons.Size()-1)) ; i > 0; --i)
+    {
+    layers.push_back(m_NumberOfHiddenNeurons[i-1]);
+    }
+
+  layers.push_back(shark::dataDimension(inputSamples));
+  m_Net.setStructure(layers);
+  shark::initRandomNormal(m_Net,0.1);
+
+  // Training of the first Autoencoder (first and last layer of the FF network)
+  if (m_Epsilon > 0)
+    {
+    shark::TrainingProgress<> criterion(5,m_Epsilon);
+
+    OutAutoencoderType net;
+    // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. 
+    if (m_Noise[0] != 0)
+      {
+      TrainOneLayer(criterion, net, 0, inputSamples, ofs);
+      }
+    else
+      {
+      TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
+      }
+    criterion.reset();
+    }
+  else
+    {
+    shark::MaxIterations<> criterion(m_NumberOfIterations);
+
+    OutAutoencoderType net;
+    // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
+    if (m_Noise[0] != 0)
+      {
+      TrainOneLayer(criterion, net, 0, inputSamples, ofs);
+      otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
+      }
+    else
+      {
+      TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
+      }
+    criterion.reset();
+    }
+
+  // Training of the other autoencoders
+  if (m_Epsilon > 0)
+    {
+    shark::TrainingProgress<> criterion(5,m_Epsilon);
+
+    for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+      {
+      AutoencoderType net;
+      // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
+      if (m_Noise[i] != 0)
+        {
+        TrainOneLayer(criterion, net, i, inputSamples, ofs);
+        }
+      else
+        {
+        TrainOneSparseLayer(criterion, net, i, inputSamples, ofs);
+        }
+      criterion.reset();
+      }
+    }
+  else
+    {
+    shark::MaxIterations<> criterion(m_NumberOfIterations);
+
+    for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+      {
+      AutoencoderType net;
+      // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
+      if (m_Noise[i] != 0)
+        {
+        TrainOneLayer(criterion, net, i, inputSamples, ofs);
+        otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
+        }
+      else
+        {
+        TrainOneSparseLayer( criterion, net, i, inputSamples, ofs);
+        }
+      criterion.reset();
+      }
+    }
+  if (m_NumberOfIterationsFineTuning > 0)
+    {
+    shark::MaxIterations<> criterion(m_NumberOfIterationsFineTuning);
+    TrainNetwork(criterion, inputSamples_copy, ofs);
+    }
+  this->SetDimension(m_NumberOfHiddenNeurons[m_NumberOfHiddenNeurons.Size()-1]);
+}
+
+template <class TInputValue, class NeuronType>
+template <class T, class Autoencoder>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::TrainOneLayer(
+  shark::AbstractStoppingCriterion<T> & criterion,
+  Autoencoder & net,
+  unsigned int layer_index,
+  shark::Data<shark::RealVector> &samples,
+  std::ostream& File)
+{
+  otbMsgDevMacro(<< "Noise " <<  m_Noise[layer_index]);
+  std::size_t inputs = dataDimension(samples);
+  net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
+  initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
+
+  shark::ImpulseNoiseModel noise(inputs,m_Noise[layer_index],1.0); //set an input pixel with probability m_Noise to 0
+  shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
+  shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
+  shark::SquaredLoss<shark::RealVector> loss;
+  shark::ErrorFunction error(trainSet, &model, &loss);
+
+  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  error.setRegularizer(m_Regularization[layer_index],&regularizer);
+
+  shark::IRpropPlusFull optimizer;
+  error.init();
+  optimizer.init(error);
+
+  otbMsgDevMacro(<<"Error before training : " << optimizer.solution().value);
+  if (this->m_WriteLearningCurve == true)
+    {
+    File << "end layer" << std::endl;
+    }
+
+  unsigned int i=0;
+  do
+    {
+    i++;
+    optimizer.step(error);
+    if (this->m_WriteLearningCurve == true)
+      {
+      File << optimizer.solution().value << std::endl;
+      }
+    otbMsgDevMacro(<<"Error after " << i << " iterations : " << optimizer.solution().value);
+    } while( !criterion.stop( optimizer.solution() ) );
+
+  net.setParameterVector(optimizer.solution().point);
+  m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
+  m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
+  samples = net.encode(samples);
+}
+
+template <class TInputValue, class NeuronType>
+template <class T, class Autoencoder>
+void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(
+  shark::AbstractStoppingCriterion<T> & criterion,
+  Autoencoder & net,
+  unsigned int layer_index,
+  shark::Data<shark::RealVector> &samples,
+  std::ostream& File)
+{
+  //AutoencoderType net;
+  std::size_t inputs = dataDimension(samples);
+  net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
+
+  shark::initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
+
+  // Idea : set the initials value for the output weights higher than the input weights
+
+  shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
+  shark::SquaredLoss<shark::RealVector> loss;
+  shark::SparseAutoencoderError error(trainSet,&net, &loss, m_Rho[layer_index], m_Beta[layer_index]);
+
+  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  error.setRegularizer(m_Regularization[layer_index],&regularizer);
+  shark::IRpropPlusFull optimizer;
+  error.init();
+  optimizer.init(error);
+
+  otbMsgDevMacro(<<"Error before training : " << optimizer.solution().value);
+  unsigned int i=0;
+  do
+    {
+    i++;
+    optimizer.step(error);
+    otbMsgDevMacro(<<"Error after " << i << " iterations : " << optimizer.solution().value);
+    if (this->m_WriteLearningCurve == true)
+      {
+      File << optimizer.solution().value << std::endl;
+      }
+    } while( !criterion.stop( optimizer.solution() ) );
+  if (this->m_WriteLearningCurve == true)
+    {
+    File << "end layer" << std::endl;
+    }
+  net.setParameterVector(optimizer.solution().point);
+  m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
+  m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
+  samples = net.encode(samples);
+}
+
+template <class TInputValue, class NeuronType>
+template <class T>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::TrainNetwork(
+  shark::AbstractStoppingCriterion<T> & criterion,
+  shark::Data<shark::RealVector> &samples,
+  std::ostream& File)
+{
+  //labels identical to inputs
+  shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);
+  shark::SquaredLoss<shark::RealVector> loss;
+
+  shark::ErrorFunction error(trainSet, &m_Net, &loss);
+  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  error.setRegularizer(m_Regularization[0],&regularizer);
+
+  shark::IRpropPlusFull optimizer;
+  error.init();
+  optimizer.init(error);
+  otbMsgDevMacro(<<"Error before training : " << optimizer.solution().value);
+  unsigned int i=0;
+  while( !criterion.stop( optimizer.solution() ) )
+    {
+    i++;
+    optimizer.step(error);
+    otbMsgDevMacro(<<"Error after " << i << " iterations : " << optimizer.solution().value);
+    if (this->m_WriteLearningCurve == true)
+      {
+      File << optimizer.solution().value << std::endl;
+      }
+    }
+}
+
+template <class TInputValue, class NeuronType>
+bool
+AutoencoderModel<TInputValue,NeuronType>
+::CanReadFile(const std::string & filename)
+{
+  try
+    {
+    this->Load(filename);
+    m_Net.name();
+    }
+  catch(...)
+    {
+    return false;
+    }
+  return true;
+}
+
+template <class TInputValue, class NeuronType>
+bool
+AutoencoderModel<TInputValue,NeuronType>
+::CanWriteFile(const std::string & /*filename*/)
+{
+  return true;
+}
+
+template <class TInputValue, class NeuronType>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::Save(const std::string & filename, const std::string & /*name*/)
+{
+  otbMsgDevMacro(<< "saving model ...");
+  std::ofstream ofs(filename);
+  ofs << m_Net.name() << std::endl; // the first line of the model file contains a key
+  shark::TextOutArchive oa(ofs);
+  oa << m_Net;
+  ofs.close();
+
+  if (this->m_WriteWeights == true)     // output the map vectors in a txt file
+    {
+    std::ofstream otxt(filename+".txt");
+    for (unsigned int i = 0 ; i < m_Net.layerMatrices().size(); ++i)
+      {
+      otxt << "layer " << i << std::endl;
+      otxt << m_Net.layerMatrix(i) << std::endl;
+      otxt << m_Net.bias(i) << std::endl;
+      otxt << std::endl;
+      }
+    }
+}
+
+template <class TInputValue, class NeuronType>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::Load(const std::string & filename, const std::string & /*name*/)
+{
+  NetworkType net;
+  std::ifstream ifs(filename);
+  char autoencoder[256];
+  ifs.getline(autoencoder,256);
+  std::string autoencoderstr(autoencoder);
+
+  if (autoencoderstr != net.name()){
+    itkExceptionMacro(<< "Error opening " << filename.c_str() );
+    }
+  shark::TextInArchive ia(ifs);
+  ia >> m_Net;
+  ifs.close();
+
+  // This gives us the dimension if we keep the encoder and decoder
+  size_t feature_layer_index = m_Net.layerMatrices().size()/2;
+  // number of neurons in the feature layer (second dimension of the first decoder weight matrix)
+  this->SetDimension(m_Net.layerMatrix(feature_layer_index).size2());
+}
+
+template <class TInputValue, class NeuronType>
+typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType
+AutoencoderModel<TInputValue,NeuronType>
+::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
+{  
+  shark::RealVector samples(value.Size());
+  for(size_t i = 0; i < value.Size();i++)
+    {
+    samples[i]=value[i];
+    }
+
+  std::vector<shark::RealVector> features;
+  features.push_back(samples);
+
+  shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
+
+  // features layer for a network containing the encoder and decoder part
+  data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
+  TargetSampleType target;
+  target.SetSize(this->m_Dimension);
+
+  for(unsigned int a = 0; a < this->m_Dimension; ++a)
+    {
+    target[a]=data.element(0)[a];
+    }
+  return target;
+}
+
+template <class TInputValue, class NeuronType>
+void
+AutoencoderModel<TInputValue,NeuronType>
+::DoPredictBatch(
+  const InputListSampleType *input,
+  const unsigned int & startIndex,
+  const unsigned int & size,
+  TargetListSampleType * targets,
+  ConfidenceListSampleType * /*quality*/) const
+{
+  std::vector<shark::RealVector> features;
+  Shark::ListSampleRangeToSharkVector(input, features,startIndex,size);
+  shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
+  TargetSampleType target;
+  // features layer for a network containing the encoder and decoder part
+  data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
+
+  unsigned int id = startIndex;
+  target.SetSize(this->m_Dimension);
+
+  for(const auto& p : data.elements())
+    {
+    for(unsigned int a = 0; a < this->m_Dimension; ++a)
+      {
+      target[a]=p[a];
+      }
+    targets->SetMeasurementVector(id,target);
+    ++id;
+    }
+}
+
+} // namespace otb
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h
new file mode 100644
index 0000000000000000000000000000000000000000..711711ca4b5afd0a9d1864cc7b300dd56d40db64
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbAutoencoderModelFactory_h
+#define otbAutoencoderModelFactory_h
+
+#include "itkObjectFactoryBase.h"
+#include "itkImageIOBase.h"
+
+namespace otb
+{
+
+/**
+ * \class AutoencoderModelFactory
+ *
+ * Factory for AutoencoderModel
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, class TTargetValue, class NeuronType>
+class ITK_EXPORT AutoencoderModelFactory : public itk::ObjectFactoryBase
+{
+public:
+  /** Standard class typedefs. */
+  typedef AutoencoderModelFactory   Self;
+  typedef itk::ObjectFactoryBase        Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Class methods used to interface with the registered factories. */
+  const char* GetITKSourceVersion(void) const ITK_OVERRIDE;
+  const char* GetDescription(void) const ITK_OVERRIDE;
+
+  /** Method for class instantiation. */
+  itkFactorylessNewMacro(Self);
+
+  /** Run-time type information (and related methods). */
+  itkTypeMacro(AutoencoderModelFactory, itk::ObjectFactoryBase);
+
+  /** Register one factory of this type  */
+  static void RegisterOneFactory(void)
+  {
+    Pointer AEFactory = AutoencoderModelFactory::New();
+    itk::ObjectFactoryBase::RegisterFactory(AEFactory);
+  }
+
+protected:
+  AutoencoderModelFactory();
+  ~AutoencoderModelFactory() ITK_OVERRIDE;
+
+private:
+  AutoencoderModelFactory(const Self &); //purposely not implemented
+  void operator =(const Self&); //purposely not implemented
+};
+
+} //namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbAutoencoderModelFactory.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx
new file mode 100644
index 0000000000000000000000000000000000000000..c55f718a622ccd6ecc9632bd8cac7b9aa38c78bf
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModelFactory.txx
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbAutoencoderModelFactory_txx
+#define otbAutoencoderModelFactory_txx
+
+#include "otbAutoencoderModelFactory.h"
+#include "otbAutoencoderModel.h"
+
+#include "itkCreateObjectFunction.h"
+#include "itkVersion.h"
+
+namespace otb
+{
+template <class TInputValue, class TOutputValue, class NeuronType>
+AutoencoderModelFactory<TInputValue,TOutputValue, NeuronType>::AutoencoderModelFactory()
+{
+  std::string classOverride = std::string("DimensionalityReductionModel");
+  std::string subclass = std::string("AutoencoderModel");
+
+  this->RegisterOverride(
+    classOverride.c_str(),
+    subclass.c_str(),
+    "Shark AE ML Model",
+    1,
+    itk::CreateObjectFunction<AutoencoderModel<TInputValue,NeuronType > >::New());
+}
+
+template <class TInputValue, class TOutputValue, class NeuronType>
+AutoencoderModelFactory<TInputValue,TOutputValue, NeuronType>::~AutoencoderModelFactory()
+{
+}
+
+template <class TInputValue, class TOutputValue, class NeuronType>
+const char*
+AutoencoderModelFactory<TInputValue,TOutputValue, NeuronType>::GetITKSourceVersion(void) const
+{
+  return ITK_SOURCE_VERSION;
+}
+
+template <class TInputValue, class TOutputValue, class NeuronType>
+const char*
+AutoencoderModelFactory<TInputValue,TOutputValue, NeuronType>::GetDescription() const
+{
+  return "Autoencoder model factory";
+}
+
+} // end namespace otb
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.h
new file mode 100644
index 0000000000000000000000000000000000000000..9bbd28bc0b9ebbe7561bb19ca5f6df5b398a082b
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbDimensionalityReductionModelFactory_h
+#define otbDimensionalityReductionModelFactory_h
+
+#include "otbMachineLearningModelFactoryBase.h"
+#include "otbMachineLearningModel.h" 
+
+namespace otb
+{
+/** \class MachineLearningModelFactory
+ * \brief Creation of object instance using object factory.
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, class TOutputValue>
+class DimensionalityReductionModelFactory : public MachineLearningModelFactoryBase
+{
+public:
+  /** Standard class typedefs. */
+  typedef DimensionalityReductionModelFactory                Self;
+  typedef itk::Object           Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Class Methods used to interface with the registered factories */
+
+  /** Run-time type information (and related methods). */
+  itkTypeMacro(DimensionalityReductionModelFactory, itk::Object);
+
+  /** Convenient typedefs. */
+  typedef otb::MachineLearningModel<
+    itk::VariableLengthVector< TInputValue >,
+    itk::VariableLengthVector< TOutputValue> >  DimensionalityReductionModelType;
+  typedef typename DimensionalityReductionModelType::Pointer DimensionalityReductionModelTypePointer;
+
+  /** Mode in which the files is intended to be used */
+  typedef enum { ReadMode, WriteMode } FileModeType;
+
+  /** Create the appropriate MachineLearningModel depending on the particulars of the file. */
+  static DimensionalityReductionModelTypePointer CreateDimensionalityReductionModel(const std::string& path, FileModeType mode);
+
+  static void CleanFactories();
+
+protected:
+  DimensionalityReductionModelFactory();
+  ~DimensionalityReductionModelFactory() ITK_OVERRIDE;
+
+private:
+  DimensionalityReductionModelFactory(const Self &); //purposely not implemented
+  void operator =(const Self&); //purposely not implemented
+
+  /** Register Built-in factories */
+  static void RegisterBuiltInFactories();
+
+  /** Register a single factory, ensuring it has not been registered
+    * twice */
+  static void RegisterFactory(itk::ObjectFactoryBase * factory);
+};
+
+} // end namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbDimensionalityReductionModelFactory.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.txx
new file mode 100644
index 0000000000000000000000000000000000000000..451fce5b7c7b8ff866d3a921a883682c4e3426a8
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbDimensionalityReductionModelFactory.txx
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbDimensionalityReductionModelFactory_txx
+#define otbDimensionalityReductionModelFactory_txx
+
+#include "otbDimensionalityReductionModelFactory.h"
+#include "otbConfigure.h"
+
+#include "otbSOMModelFactory.h"
+
+#ifdef OTB_USE_SHARK
+#include "otbAutoencoderModelFactory.h"
+#include "otbPCAModelFactory.h"
+#endif
+
+#include "itkMutexLockHolder.h"
+
+namespace otb
+{
+
+template <class TInputValue, class TTargetValue>
+using LogAutoencoderModelFactory = AutoencoderModelFactory<TInputValue, TTargetValue, shark::LogisticNeuron>  ;
+
+template <class TInputValue, class TTargetValue>
+using SOM2DModelFactory = SOMModelFactory<TInputValue, TTargetValue, 2>  ;
+
+template <class TInputValue, class TTargetValue>
+using SOM3DModelFactory = SOMModelFactory<TInputValue, TTargetValue, 3>  ;
+
+template <class TInputValue, class TTargetValue>
+using SOM4DModelFactory = SOMModelFactory<TInputValue, TTargetValue, 4>  ;
+
+template <class TInputValue, class TTargetValue>
+using SOM5DModelFactory = SOMModelFactory<TInputValue, TTargetValue, 5>  ;
+
+
+template <class TInputValue, class TOutputValue>
+typename DimensionalityReductionModelFactory<TInputValue,TOutputValue>::DimensionalityReductionModelTypePointer
+DimensionalityReductionModelFactory<TInputValue,TOutputValue>
+::CreateDimensionalityReductionModel(const std::string& path, FileModeType mode)
+{
+  RegisterBuiltInFactories();
+
+  std::list<DimensionalityReductionModelTypePointer> possibleDimensionalityReductionModel;
+  std::list<LightObject::Pointer> allobjects =
+    itk::ObjectFactoryBase::CreateAllInstance("DimensionalityReductionModel");
+
+  for(std::list<LightObject::Pointer>::iterator i = allobjects.begin();
+      i != allobjects.end(); ++i)
+    {
+    DimensionalityReductionModelType* io =
+      dynamic_cast<DimensionalityReductionModelType*>(i->GetPointer());
+    if(io)
+      {
+      possibleDimensionalityReductionModel.push_back(io);
+      }
+    else
+      {
+      std::cerr << "Error DimensionalityReductionModel Factory did not return an DimensionalityReductionModel: "
+                << (*i)->GetNameOfClass()
+                << std::endl;
+      }
+    }
+
+  for(typename std::list<DimensionalityReductionModelTypePointer>::iterator k = possibleDimensionalityReductionModel.begin();
+      k != possibleDimensionalityReductionModel.end(); ++k)
+    {
+    if( mode == ReadMode )
+      {
+      if((*k)->CanReadFile(path))
+        {
+        return *k;
+        }
+      }
+    else if( mode == WriteMode )
+      {
+      if((*k)->CanWriteFile(path))
+        {
+        return *k;
+        }
+      }
+    }
+  return ITK_NULLPTR;
+}
+
+template <class TInputValue, class TOutputValue>
+void
+DimensionalityReductionModelFactory<TInputValue,TOutputValue>
+::RegisterBuiltInFactories()
+{
+  itk::MutexLockHolder<itk::SimpleMutexLock> lockHolder(mutex);
+
+  RegisterFactory(SOM2DModelFactory<TInputValue,TOutputValue>::New());
+  RegisterFactory(SOM3DModelFactory<TInputValue,TOutputValue>::New());
+  RegisterFactory(SOM4DModelFactory<TInputValue,TOutputValue>::New());
+  RegisterFactory(SOM5DModelFactory<TInputValue,TOutputValue>::New());
+
+#ifdef OTB_USE_SHARK
+  RegisterFactory(PCAModelFactory<TInputValue,TOutputValue>::New());
+  RegisterFactory(LogAutoencoderModelFactory<TInputValue,TOutputValue>::New());
+#endif
+}
+
+template <class TInputValue, class TOutputValue>
+void
+DimensionalityReductionModelFactory<TInputValue,TOutputValue>
+::RegisterFactory(itk::ObjectFactoryBase * factory)
+{
+  // Unregister any previously registered factory of the same class
+  // Might be more intensive but static bool is not an option due to
+  // ld error.
+  itk::ObjectFactoryBase::UnRegisterFactory(factory);
+  itk::ObjectFactoryBase::RegisterFactory(factory);
+}
+
+template <class TInputValue, class TOutputValue>
+void
+DimensionalityReductionModelFactory<TInputValue,TOutputValue>
+::CleanFactories()
+{
+  itk::MutexLockHolder<itk::SimpleMutexLock> lockHolder(mutex);
+
+  std::list<itk::ObjectFactoryBase*> factories = itk::ObjectFactoryBase::GetRegisteredFactories();
+  std::list<itk::ObjectFactoryBase*>::iterator itFac;
+
+  for (itFac = factories.begin(); itFac != factories.end() ; ++itFac)
+    {
+    // SOM 5D
+    SOM5DModelFactory<TInputValue,TOutputValue> *som5dFactory =
+      dynamic_cast<SOM5DModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (som5dFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(som5dFactory);
+      continue;
+      }
+    // SOM 4D
+    SOM4DModelFactory<TInputValue,TOutputValue> *som4dFactory =
+      dynamic_cast<SOM4DModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (som4dFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(som4dFactory);
+      continue;
+      }
+    // SOM 3D
+    SOM3DModelFactory<TInputValue,TOutputValue> *som3dFactory =
+      dynamic_cast<SOM3DModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (som3dFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(som3dFactory);
+      continue;
+      }
+    // SOM 2D
+    SOM2DModelFactory<TInputValue,TOutputValue> *som2dFactory =
+      dynamic_cast<SOM2DModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (som2dFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(som2dFactory);
+      continue;
+      }
+#ifdef OTB_USE_SHARK
+    // Autoencoder
+    LogAutoencoderModelFactory<TInputValue,TOutputValue> *aeFactory =
+      dynamic_cast<LogAutoencoderModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (aeFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(aeFactory);
+      continue;
+      }
+    // PCA
+    PCAModelFactory<TInputValue,TOutputValue> *pcaFactory =
+      dynamic_cast<PCAModelFactory<TInputValue,TOutputValue> *>(*itFac);
+    if (pcaFactory)
+      {
+      itk::ObjectFactoryBase::UnRegisterFactory(pcaFactory);
+      continue;
+      }
+#endif
+    }
+}
+
+} // end namespace otb
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.h b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.h
new file mode 100644
index 0000000000000000000000000000000000000000..d5538c1700538d6e77adb78c0358108e9730ce73
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbImageDimensionalityReduction_h
+#define otbImageDimensionalityReduction_h
+
+#include "itkImageToImageFilter.h"
+#include "otbMachineLearningModel.h"
+#include "otbImage.h"
+
+namespace otb
+{
+/** \class ImageClassificationFilter
+ *  \brief This filter performs the classification of a VectorImage using a Model.
+ *
+ *  This filter is streamed and threaded, allowing to classify huge images
+ *  while fully using several core.
+ *
+ * \sa Classifier
+ * \ingroup Streamed
+ * \ingroup Threaded
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputImage, class TOutputImage, class TMaskImage = TOutputImage>
+class ITK_EXPORT ImageDimensionalityReductionFilter
+  : public itk::ImageToImageFilter<TInputImage, TOutputImage>
+{
+public:
+  /** Standard typedefs */
+  typedef ImageDimensionalityReductionFilter                       Self;
+  typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
+  typedef itk::SmartPointer<Self>                            Pointer;
+  typedef itk::SmartPointer<const Self>                      ConstPointer;
+
+  /** Type macro */
+  itkNewMacro(Self);
+
+  /** Creation through object factory macro */
+  itkTypeMacro(ImageDimensionalityReductionFilter, ImageToImageFilter);
+
+  typedef TInputImage                                InputImageType;
+  typedef typename InputImageType::ConstPointer      InputImageConstPointerType;
+  typedef typename InputImageType::InternalPixelType ValueType;
+
+  typedef TMaskImage                           MaskImageType;
+  typedef typename MaskImageType::ConstPointer MaskImageConstPointerType;
+  typedef typename MaskImageType::Pointer      MaskImagePointerType;
+
+  typedef TOutputImage                         OutputImageType;
+  typedef typename OutputImageType::Pointer    OutputImagePointerType;
+  typedef typename OutputImageType::RegionType OutputImageRegionType;
+  typedef typename OutputImageType::InternalPixelType  LabelType;
+
+  typedef MachineLearningModel<itk::VariableLengthVector<ValueType>, itk::VariableLengthVector<LabelType>> ModelType;
+  typedef typename ModelType::Pointer                ModelPointerType;
+
+  typedef otb::Image<double>                    ConfidenceImageType;
+  typedef typename ConfidenceImageType::Pointer ConfidenceImagePointerType;
+
+  /** Set/Get the model */
+  itkSetObjectMacro(Model, ModelType);
+  itkGetObjectMacro(Model, ModelType);
+
+  /** Set/Get the default label */
+  itkSetMacro(DefaultLabel, LabelType);
+  itkGetMacro(DefaultLabel, LabelType);
+
+  /** Set/Get the confidence map flag */
+  itkSetMacro(UseConfidenceMap, bool);
+  itkGetMacro(UseConfidenceMap, bool);
+
+  itkSetMacro(BatchMode, bool);
+  itkGetMacro(BatchMode, bool);
+  itkBooleanMacro(BatchMode);
+  
+  /**
+   * If set, only pixels within the mask will be classified.
+   * All pixels with a value greater than 0 in the mask, will be classified.
+   * \param mask The input mask.
+   */
+  void SetInputMask(const MaskImageType * mask);
+
+  /**
+   * Get the input mask.
+   * \return The mask.
+   */
+  const MaskImageType * GetInputMask(void);
+
+  /**
+   * Get the output confidence map
+   */
+  ConfidenceImageType * GetOutputConfidence(void);
+
+protected:
+  /** Constructor */
+  ImageDimensionalityReductionFilter();
+  /** Destructor */
+  ~ImageDimensionalityReductionFilter() ITK_OVERRIDE {}
+
+  /** Generate output information */
+  virtual void GenerateOutputInformation();
+
+  /** Threaded generate data */  
+  void ThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId) ITK_OVERRIDE;
+  void ClassicThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId);
+  void BatchThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId);
+  /** Before threaded generate data */
+  void BeforeThreadedGenerateData() ITK_OVERRIDE;
+  /**PrintSelf method */
+  void PrintSelf(std::ostream& os, itk::Indent indent) const ITK_OVERRIDE;
+
+private:
+  ImageDimensionalityReductionFilter(const Self &); //purposely not implemented
+  void operator =(const Self&); //purposely not implemented
+
+  /** The model used for classification */
+  ModelPointerType m_Model;
+  /** Default label for invalid pixels (when using a mask) */
+  LabelType m_DefaultLabel;
+  /** Flag to produce the confidence map (if the model supports it) */
+  bool m_UseConfidenceMap;
+  bool m_BatchMode;
+};
+} // End namespace otb
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbImageDimensionalityReductionFilter.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx
new file mode 100644
index 0000000000000000000000000000000000000000..140a8cd874e3555f73d985e6234c926c93e04993
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbImageDimensionalityReductionFilter.txx
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbImageDimensionalityReductionFilter_txx
+#define otbImageDimensionalityReductionFilter_txx
+
+#include "otbImageDimensionalityReductionFilter.h"
+#include "itkImageRegionIterator.h"
+#include "itkProgressReporter.h"
+
+namespace otb
+{
+/**
+ * Constructor
+ */
+template <class TInputImage, class TOutputImage, class TMaskImage>
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::ImageDimensionalityReductionFilter()
+{
+  this->SetNumberOfIndexedInputs(2);
+  this->SetNumberOfRequiredInputs(1);
+
+  this->SetNumberOfRequiredOutputs(2);
+  this->SetNthOutput(0,TOutputImage::New());
+  this->SetNthOutput(1,ConfidenceImageType::New());
+  m_UseConfidenceMap = false;
+  m_BatchMode = true;
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::SetInputMask(const MaskImageType * mask)
+{
+  this->itk::ProcessObject::SetNthInput(1, const_cast<MaskImageType *>(mask));
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+const typename ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::MaskImageType *
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::GetInputMask()
+{
+  if (this->GetNumberOfInputs() < 2)
+    {
+    return ITK_NULLPTR;
+    }
+  return static_cast<const MaskImageType *>(this->itk::ProcessObject::GetInput(1));
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+typename ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::ConfidenceImageType *
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::GetOutputConfidence()
+{
+  if (this->GetNumberOfOutputs() < 2)
+    {
+    return ITK_NULLPTR;
+    }
+  return static_cast<ConfidenceImageType *>(this->itk::ProcessObject::GetOutput(1));
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::BeforeThreadedGenerateData()
+{
+  if(m_BatchMode)
+    {
+    #ifdef _OPENMP
+    // OpenMP will take care of threading
+    this->SetNumberOfThreads(1);
+    #endif
+    }
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::ClassicThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId)
+{
+  // Get the input pointers
+  InputImageConstPointerType inputPtr     = this->GetInput();
+  MaskImageConstPointerType  inputMaskPtr  = this->GetInputMask();
+  OutputImagePointerType     outputPtr    = this->GetOutput();
+  ConfidenceImagePointerType confidencePtr = this->GetOutputConfidence();
+
+  // Progress reporting
+  itk::ProgressReporter progress(this, threadId, outputRegionForThread.GetNumberOfPixels());
+
+  // Define iterators
+  typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
+  typedef itk::ImageRegionIterator<OutputImageType>     OutputIteratorType;
+
+  InputIteratorType inIt(inputPtr, outputRegionForThread);
+  OutputIteratorType outIt(outputPtr, outputRegionForThread);
+
+  // Walk the part of the image
+  for (inIt.GoToBegin(), outIt.GoToBegin(); !inIt.IsAtEnd() && !outIt.IsAtEnd(); ++inIt, ++outIt)
+    {
+    // Classifify
+    outIt.Set(m_Model->Predict(inIt.Get()));
+    progress.CompletedPixel();
+    }
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>::GenerateOutputInformation()
+{
+  Superclass::GenerateOutputInformation();
+  if (!m_Model)
+    {
+    itkGenericExceptionMacro(<< "No model for dimensionality reduction");
+    }
+  this->GetOutput()->SetNumberOfComponentsPerPixel( m_Model->GetDimension() );
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::BatchThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId)
+{
+  // Get the input pointers
+  InputImageConstPointerType inputPtr     = this->GetInput();
+  MaskImageConstPointerType  inputMaskPtr  = this->GetInputMask();
+  OutputImagePointerType     outputPtr    = this->GetOutput();
+  ConfidenceImagePointerType confidencePtr = this->GetOutputConfidence();
+
+  // Progress reporting
+  itk::ProgressReporter progress(this, threadId, outputRegionForThread.GetNumberOfPixels());
+
+  // Define iterators
+  typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
+  typedef itk::ImageRegionIterator<OutputImageType>     OutputIteratorType;
+
+  InputIteratorType inIt(inputPtr, outputRegionForThread);
+  OutputIteratorType outIt(outputPtr, outputRegionForThread);
+
+  typedef typename ModelType::InputSampleType      InputSampleType;
+  typedef typename ModelType::InputListSampleType  InputListSampleType;
+  typedef typename ModelType::TargetValueType      TargetValueType;
+  typedef typename ModelType::TargetListSampleType TargetListSampleType;
+
+  typename InputListSampleType::Pointer samples = InputListSampleType::New();
+  unsigned int num_features = inputPtr->GetNumberOfComponentsPerPixel();
+  samples->SetMeasurementVectorSize(num_features);
+  InputSampleType sample(num_features);
+
+  // Fill the samples
+  for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
+    {
+    typename InputImageType::PixelType pix = inIt.Get();
+    for(size_t feat=0; feat<num_features; ++feat)
+      {
+      sample[feat]=pix[feat];
+      }
+    samples->PushBack(sample);
+    }
+  //Make the batch prediction
+  typename TargetListSampleType::Pointer labels;
+
+  // This call is threadsafe
+  labels = m_Model->PredictBatch(samples);
+
+  // Set the output values
+  typename TargetListSampleType::ConstIterator labIt = labels->Begin();
+  for (outIt.GoToBegin(); !outIt.IsAtEnd(); ++outIt)
+    {
+    itk::VariableLengthVector<TargetValueType> labelValue;
+    labelValue = labIt.GetMeasurementVector();
+    ++labIt;    
+    outIt.Set(labelValue);
+    progress.CompletedPixel();
+    }
+}
+
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::ThreadedGenerateData(const OutputImageRegionType& outputRegionForThread, itk::ThreadIdType threadId)
+{
+  if(m_BatchMode)
+    {
+    this->BatchThreadedGenerateData(outputRegionForThread, threadId);
+    }
+  else
+    {
+    this->ClassicThreadedGenerateData(outputRegionForThread, threadId);
+    }
+}
+
+/**
+ * PrintSelf Method
+ */
+template <class TInputImage, class TOutputImage, class TMaskImage>
+void
+ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
+::PrintSelf(std::ostream& os, itk::Indent indent) const
+{
+  Superclass::PrintSelf(os, indent);
+}
+
+} // End namespace otb
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h
new file mode 100644
index 0000000000000000000000000000000000000000..dd8f7a6992b1b3898608c90b47ead64db1a45c8a
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbPCAModel_h
+#define otbPCAModel_h
+
+#include "otbMachineLearningModelTraits.h"
+#include "otbMachineLearningModel.h"
+
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+#endif
+#include "otb_shark.h"
+#include <shark/Algorithms/Trainers/PCA.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+namespace otb
+{
+
+/** \class PCAModel
+ *
+ * This class wraps a PCA model implemented by Shark, in a otb::MachineLearningModel
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue>
+class ITK_EXPORT PCAModel
+  : public  MachineLearningModel<
+    itk::VariableLengthVector< TInputValue >,
+    itk::VariableLengthVector< TInputValue > >    
+{
+public:
+  typedef PCAModel Self;
+  typedef MachineLearningModel<
+    itk::VariableLengthVector< TInputValue >,
+    itk::VariableLengthVector< TInputValue> > Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  typedef typename Superclass::InputValueType       InputValueType;
+  typedef typename Superclass::InputSampleType      InputSampleType;
+  typedef typename Superclass::InputListSampleType  InputListSampleType;
+  typedef typename InputListSampleType::Pointer     ListSamplePointerType;
+  typedef typename Superclass::TargetValueType      TargetValueType;
+  typedef typename Superclass::TargetSampleType     TargetSampleType;
+  typedef typename Superclass::TargetListSampleType TargetListSampleType;
+
+  // Confidence map related typedefs
+  typedef typename Superclass::ConfidenceValueType       ConfidenceValueType;
+  typedef typename Superclass::ConfidenceSampleType      ConfidenceSampleType;
+  typedef typename Superclass::ConfidenceListSampleType  ConfidenceListSampleType;
+
+  itkNewMacro(Self);
+  itkTypeMacro(PCAModel, DimensionalityReductionModel);
+
+  itkSetMacro(DoResizeFlag,bool);
+
+  itkSetMacro(WriteEigenvectors, bool);
+  itkGetMacro(WriteEigenvectors, bool);
+
+  bool CanReadFile(const std::string & filename);
+  bool CanWriteFile(const std::string & filename);
+
+  void Save(const std::string & filename, const std::string & name="")  ITK_OVERRIDE;
+  void Load(const std::string & filename, const std::string & name="")  ITK_OVERRIDE;
+
+  void Train() ITK_OVERRIDE;
+
+protected:
+  PCAModel(); 
+  ~PCAModel() ITK_OVERRIDE;
+ 
+  virtual TargetSampleType DoPredict(
+    const InputSampleType& input,
+    ConfidenceValueType * quality = ITK_NULLPTR) const;
+
+  virtual void DoPredictBatch(
+    const InputListSampleType *,
+    const unsigned int & startIndex,
+    const unsigned int & size,
+    TargetListSampleType *,
+    ConfidenceListSampleType * quality = ITK_NULLPTR) const ITK_OVERRIDE;
+
+private:
+  shark::LinearModel<> m_Encoder;
+  shark::LinearModel<> m_Decoder;
+  shark::PCA m_PCA;
+  bool m_DoResizeFlag;
+  bool m_WriteEigenvectors;
+};
+} // end namespace otb
+
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbPCAModel.txx"
+#endif
+
+
+#endif
+
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
new file mode 100644
index 0000000000000000000000000000000000000000..9f39326a21bc5f1980a49d80ecdaea55b42a450a
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbPCAModel_txx
+#define otbPCAModel_txx
+
+#include "otbPCAModel.h"
+
+#include <fstream>
+#include "itkMacro.h"
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Woverloaded-virtual"
+#endif
+#include "otbSharkUtils.h"
+//include train function
+#include <shark/ObjectiveFunctions/ErrorFunction.h>
+#include <shark/Algorithms/GradientDescent/Rprop.h>// the RProp optimization algorithm
+#include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
+#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
+#include <shark/ObjectiveFunctions/ErrorFunction.h>
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+
+namespace otb
+{
+
+template <class TInputValue>
+PCAModel<TInputValue>::PCAModel()
+{
+  this->m_IsDoPredictBatchMultiThreaded = true;
+  this->m_Dimension = 0;
+}
+
+template <class TInputValue>
+PCAModel<TInputValue>::~PCAModel()
+{
+}
+
+template <class TInputValue>
+void
+PCAModel<TInputValue>::Train()
+{
+  std::vector<shark::RealVector> features;
+
+  Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+
+  shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+  m_PCA.setData(inputSamples);
+  m_PCA.encoder(m_Encoder, this->m_Dimension);
+  m_PCA.decoder(m_Decoder, this->m_Dimension);
+}
+
+template <class TInputValue>
+bool
+PCAModel<TInputValue>::CanReadFile(const std::string & filename)
+{
+  try
+  {
+    this->Load(filename);
+    m_Encoder.name();
+  }
+  catch(...)
+  {
+  return false;
+  }
+  return true;
+}
+
+template <class TInputValue>
+bool PCAModel<TInputValue>::CanWriteFile(const std::string & /*filename*/)
+{
+  return true;
+}
+
+template <class TInputValue>
+void
+PCAModel<TInputValue>::Save(const std::string & filename, const std::string & /*name*/)
+{
+  std::ofstream ofs(filename);
+  ofs << "pca" << std::endl; //first line
+  shark::TextOutArchive oa(ofs);
+  m_Encoder.write(oa);
+  ofs.close();
+
+  if (this->m_WriteEigenvectors == true)     // output the map vectors in a txt file
+    {
+    std::ofstream otxt(filename+".txt");
+
+    otxt << "Eigenvectors : " << m_PCA.eigenvectors() << std::endl;
+    otxt << "Eigenvalues : " << m_PCA.eigenvalues() << std::endl;
+
+    std::vector<shark::RealVector> features;
+
+    shark::SquaredLoss<shark::RealVector> loss;
+    Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+    shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+    otxt << "Reconstruction error : " <<
+      loss.eval(inputSamples,m_Decoder(m_Encoder(inputSamples))) << std::endl;
+    otxt.close();
+    }
+}
+
+template <class TInputValue>
+void
+PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*name*/)
+{
+  std::ifstream ifs(filename);
+  char encoder[256];
+  ifs.getline(encoder,256); 
+  std::string encoderstr(encoder);
+
+  if (encoderstr != "pca"){
+    itkExceptionMacro(<< "Error opening " << filename.c_str() );
+    }
+  shark::TextInArchive ia(ifs);
+  m_Encoder.read(ia);
+  ifs.close();
+  if (this->m_Dimension ==0)
+  {
+    this->m_Dimension = m_Encoder.outputSize();
+  }
+
+  auto eigenvectors = m_Encoder.matrix();
+  eigenvectors.resize(this->m_Dimension,m_Encoder.inputSize());
+
+  m_Encoder.setStructure(eigenvectors, m_Encoder.offset() );
+}
+
+template <class TInputValue>
+typename PCAModel<TInputValue>::TargetSampleType
+PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
+{  
+  shark::RealVector samples(value.Size());
+  for(size_t i = 0; i < value.Size();i++)
+    {
+    samples[i]=value[i];
+    }
+    
+    std::vector<shark::RealVector> features;
+    features.push_back(samples);
+   
+    shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
+     
+  data = m_Encoder(data);
+  TargetSampleType target;
+  target.SetSize(this->m_Dimension);
+  
+  for(unsigned int a = 0; a < this->m_Dimension; ++a){
+    target[a]=data.element(0)[a];
+  }
+  return target;
+}
+
+template <class TInputValue>
+void PCAModel<TInputValue>
+::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
+{
+  std::vector<shark::RealVector> features;
+  Shark::ListSampleRangeToSharkVector(input, features,startIndex,size);
+  shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
+  TargetSampleType target;
+  data = m_Encoder(data);
+  unsigned int id = startIndex;
+  target.SetSize(this->m_Dimension);
+  for(const auto& p : data.elements())
+    {
+    for(unsigned int a = 0; a < this->m_Dimension; ++a)
+      {
+      target[a]=p[a];
+      }
+    targets->SetMeasurementVector(id,target);
+    ++id;
+    }
+}
+
+} // namespace otb
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.h
new file mode 100644
index 0000000000000000000000000000000000000000..d30a2c4663f0e9b44afc66045ffd1bd24b89dffc
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbPCAModelFactory_h
+#define otbPCAModelFactory_h
+
+#include "itkObjectFactoryBase.h"
+#include "itkImageIOBase.h"
+
+namespace otb
+{
+
+/** \class PCAModelFactory
+ * 
+ * Factory for the PCAModel
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, class TTargetValue>
+class ITK_EXPORT PCAModelFactory : public itk::ObjectFactoryBase
+{
+public:
+  /** Standard class typedefs. */
+  typedef PCAModelFactory   Self;
+  typedef itk::ObjectFactoryBase        Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Class methods used to interface with the registered factories. */
+  const char* GetITKSourceVersion(void) const ITK_OVERRIDE;
+  const char* GetDescription(void) const ITK_OVERRIDE;
+
+  /** Method for class instantiation. */
+  itkFactorylessNewMacro(Self);
+
+  /** Run-time type information (and related methods). */
+  itkTypeMacro(PCAModelFactory, itk::ObjectFactoryBase);
+
+  /** Register one factory of this type  */
+  static void RegisterOneFactory(void)
+  {
+    Pointer PCAFactory = PCAModelFactory::New();
+    itk::ObjectFactoryBase::RegisterFactory(PCAFactory);
+  }
+
+protected:
+  PCAModelFactory();
+  ~PCAModelFactory() ITK_OVERRIDE;
+
+private:
+  PCAModelFactory(const Self &); //purposely not implemented
+  void operator =(const Self&); //purposely not implemented
+};
+
+} //namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbPCAModelFactory.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.txx
new file mode 100644
index 0000000000000000000000000000000000000000..ab31accbe01b926a1d4ca57b4f23b3fc813cd931
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModelFactory.txx
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbPCAFactory_txx
+#define otbPCAFactory_txx
+
+#include "otbPCAModelFactory.h"
+
+#include "itkCreateObjectFunction.h"
+#include "otbPCAModel.h"
+#include "itkVersion.h"
+
+namespace otb
+{
+
+template <class TInputValue, class TOutputValue>
+PCAModelFactory<TInputValue,TOutputValue>::PCAModelFactory()
+{
+  std::string classOverride = std::string("DimensionalityReductionModel");
+  std::string subclass = std::string("PCAModel");
+
+  this->RegisterOverride(
+    classOverride.c_str(),
+    subclass.c_str(),
+    "Shark PCA ML Model",
+    1,
+    itk::CreateObjectFunction<PCAModel<TInputValue>>::New());
+}
+
+template <class TInputValue, class TOutputValue>
+PCAModelFactory<TInputValue,TOutputValue>::~PCAModelFactory()
+{
+}
+
+template <class TInputValue, class TOutputValue>
+const char* PCAModelFactory<TInputValue,TOutputValue>::GetITKSourceVersion(void) const
+{
+  return ITK_SOURCE_VERSION;
+}
+
+template <class TInputValue, class TOutputValue>
+const char* PCAModelFactory<TInputValue,TOutputValue>::GetDescription() const
+{
+  return "PCA model factory";
+}
+
+} // end namespace otb
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.h
new file mode 100644
index 0000000000000000000000000000000000000000..7f6fb2b08e4a4f0a2e443e90549e1a6461b31d10
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbSOMModel_h
+#define otbSOMModel_h
+
+#include "otbSOMMap.h"
+
+#include "itkEuclideanDistanceMetric.h" // the distance function
+
+#include "otbCzihoSOMLearningBehaviorFunctor.h"
+#include "otbCzihoSOMNeighborhoodBehaviorFunctor.h"
+
+#include "otbMachineLearningModelTraits.h"
+#include "otbMachineLearningModel.h"
+
+namespace otb
+{
+
+/** \class SOMModel
+ * MachineLearningModel for Self-Organizing Map
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, unsigned int MapDimension>
+class ITK_EXPORT SOMModel
+  : public  MachineLearningModel<
+    itk::VariableLengthVector< TInputValue >,
+    itk::VariableLengthVector< TInputValue > >   
+{
+public:
+  typedef SOMModel Self;
+  typedef MachineLearningModel<
+    itk::VariableLengthVector< TInputValue >,
+    itk::VariableLengthVector< TInputValue > > Superclass;
+  typedef itk::SmartPointer<Self> Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  typedef typename Superclass::InputValueType       InputValueType;
+  typedef typename Superclass::InputSampleType      InputSampleType;
+  typedef typename Superclass::InputListSampleType  InputListSampleType;
+  typedef typename InputListSampleType::Pointer     ListSamplePointerType;
+  typedef typename Superclass::TargetValueType      TargetValueType;
+  typedef typename Superclass::TargetSampleType     TargetSampleType;
+  typedef typename Superclass::TargetListSampleType TargetListSampleType;
+
+  // Confidence map related typedefs
+  typedef typename Superclass::ConfidenceValueType       ConfidenceValueType;
+  typedef typename Superclass::ConfidenceSampleType      ConfidenceSampleType;
+  typedef typename Superclass::ConfidenceListSampleType  ConfidenceListSampleType;
+
+  typedef SOMMap<
+    itk::VariableLengthVector<TInputValue>,
+    itk::Statistics::EuclideanDistanceMetric<
+      itk::VariableLengthVector<TInputValue> >,
+      MapDimension>                                 MapType;
+  typedef typename MapType::SizeType                SizeType;
+  typedef typename MapType::SpacingType             SpacingType;
+
+  typedef Functor::CzihoSOMLearningBehaviorFunctor      SOMLearningBehaviorFunctorType;
+  typedef Functor::CzihoSOMNeighborhoodBehaviorFunctor  SOMNeighborhoodBehaviorFunctorType;
+
+  itkNewMacro(Self);
+  itkTypeMacro(SOMModel, DimensionalityReductionModel);
+
+  /** Accessors */
+  itkSetMacro(NumberOfIterations, unsigned int);
+  itkGetMacro(NumberOfIterations, unsigned int);
+  itkSetMacro(BetaInit, double);
+  itkGetMacro(BetaInit, double);
+  itkSetMacro(WriteMap, bool);
+  itkGetMacro(WriteMap, bool);
+  itkSetMacro(BetaEnd, double);
+  itkGetMacro(BetaEnd, double);
+  itkSetMacro(MinWeight, InputValueType);
+  itkGetMacro(MinWeight, InputValueType);
+  itkSetMacro(MaxWeight, InputValueType);
+  itkGetMacro(MaxWeight, InputValueType);
+  itkSetMacro(MapSize, SizeType);
+  itkGetMacro(MapSize, SizeType);
+  itkSetMacro(NeighborhoodSizeInit, SizeType);
+  itkGetMacro(NeighborhoodSizeInit, SizeType);
+  itkSetMacro(RandomInit, bool);
+  itkGetMacro(RandomInit, bool);
+  itkSetMacro(Seed, unsigned int);
+  itkGetMacro(Seed, unsigned int);
+
+  bool CanReadFile(const std::string & filename);
+  bool CanWriteFile(const std::string & filename);
+
+  void Save(const std::string & filename, const std::string & name="") ;
+  void Load(const std::string & filename, const std::string & name="") ;
+
+  void Train() ITK_OVERRIDE;
+
+protected:
+  SOMModel(); 
+  ~SOMModel() ITK_OVERRIDE;
+
+private:
+  typename MapType::Pointer m_SOMMap;
+
+  virtual TargetSampleType DoPredict(
+    const InputSampleType& input,
+    ConfidenceValueType * quality = ITK_NULLPTR) const;
+
+  /** Map size (width, height) */
+  SizeType m_MapSize;
+  /** Number of iterations */
+  unsigned int m_NumberOfIterations;
+  /** Initial learning coefficient */
+  double m_BetaInit;
+  /** Final learning coefficient */
+  double m_BetaEnd;
+  /** Initial neighborhood size */
+  SizeType m_NeighborhoodSizeInit;
+  /** Minimum initial neuron weights */
+  InputValueType m_MinWeight;
+  /** Maximum initial neuron weights */
+  InputValueType m_MaxWeight;
+  /** Random initialization bool */
+  bool m_RandomInit;
+  /** Seed for random initialization */
+  unsigned int m_Seed;
+  /** Behavior of the Learning weightening (link to the beta coefficient) */
+  SOMLearningBehaviorFunctorType m_BetaFunctor;
+  /** Behavior of the Neighborhood extent */
+  SOMNeighborhoodBehaviorFunctorType m_NeighborhoodSizeFunctor;
+  /** Write the SOM Map vectors in a txt file */
+  bool m_WriteMap;
+};
+
+} // end namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbSOMModel.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx
new file mode 100644
index 0000000000000000000000000000000000000000..e1d7dc3fc6251abbdbd3aa210cd3c3ebced903d8
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModel.txx
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbSOMModel_txx
+#define otbSOMModel_txx
+
+#include "otbSOMModel.h"
+#include "otbSOM.h"
+
+#include "itkMacro.h"
+#include "itkImageRegionIterator.h"
+#include "itkImageRegionConstIterator.h"
+#include "itkImage.h"
+
+#include <fstream>
+
+namespace otb
+{
+
+namespace internal
+{
+template<typename T>
+std::ostream& BinaryWrite(std::ostream& stream, const T& value)
+{
+  return stream.write(reinterpret_cast<const char*>(&value), sizeof(T));
+}
+
+std::ostream& BinaryWriteString(std::ofstream& stream, const std::string& value)
+{
+  return stream.write(value.c_str(), value.length());
+}
+
+template<typename T>
+std::istream & BinaryRead(std::istream& stream, T& value)
+{
+  return stream.read(reinterpret_cast<char*>(&value), sizeof(T));
+}
+} // end of namespace internal
+
+template <class TInputValue, unsigned int MapDimension>
+SOMModel<TInputValue,  MapDimension>::SOMModel()
+{
+  this->m_Dimension = MapType::ImageDimension;
+}
+
+template <class TInputValue, unsigned int MapDimension>
+SOMModel<TInputValue, MapDimension>::~SOMModel()
+{
+}
+
+template <class TInputValue, unsigned int MapDimension>
+void
+SOMModel<TInputValue,  MapDimension>::Train()
+{
+  typedef otb::SOM<InputListSampleType, MapType>    EstimatorType;
+  typename EstimatorType::Pointer estimator = EstimatorType::New();
+  estimator->SetListSample(this->GetInputListSample());
+  estimator->SetMapSize(m_MapSize);
+  estimator->SetNeighborhoodSizeInit(m_NeighborhoodSizeInit);
+  estimator->SetNumberOfIterations(m_NumberOfIterations);
+  estimator->SetBetaInit(m_BetaInit);
+  estimator->SetBetaEnd(m_BetaEnd);
+  estimator->SetMaxWeight(m_MaxWeight);
+  estimator->Update();
+  m_SOMMap = estimator->GetOutput();
+}
+
+template <class TInputValue, unsigned int MapDimension>
+bool
+SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filename)
+{
+  try
+    {
+    this->Load(filename);
+    }
+  catch(...)
+    {
+    return false;
+    }
+  return true;
+}
+
+template <class TInputValue, unsigned int MapDimension>
+bool
+SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & /*filename*/)
+{
+  return true;
+}
+
+template <class TInputValue, unsigned int MapDimension>
+void
+SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & /*name*/)
+{
+  itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion());
+  inputIterator.GoToBegin();
+  std::ofstream ofs(filename, std::ios::binary);
+  internal::BinaryWriteString(ofs,"som"); 
+  internal::BinaryWrite(ofs,static_cast<unsigned int>(MapDimension));
+  SizeType size = m_SOMMap->GetLargestPossibleRegion().GetSize() ;
+  for (size_t i=0;i<MapDimension;i++)
+    {
+    internal::BinaryWrite(ofs,size[i]);
+    }
+    
+  internal::BinaryWrite(ofs,inputIterator.Get().GetNumberOfElements());
+  while(!inputIterator.IsAtEnd())
+    {
+    InputSampleType vect = inputIterator.Get();
+    for (size_t i=0;i<vect.GetNumberOfElements();i++)
+      {
+      internal::BinaryWrite(ofs,vect[i]);
+      } 
+    ++inputIterator;
+    }
+  ofs.close();
+
+  // output the map vectors in a txt file
+  if (this->m_WriteMap == true)
+    {
+    std::ofstream otxt(filename+".txt");
+    inputIterator.GoToBegin();
+    while(!inputIterator.IsAtEnd())
+      {
+      InputSampleType vect = inputIterator.Get();
+      for (size_t i=0;i<vect.GetNumberOfElements();i++)
+        {
+        otxt << vect[i] << " ";
+        } 
+      otxt << std::endl;
+      ++inputIterator;
+      }
+    otxt.close();
+    }
+}
+
+template <class TInputValue, unsigned int MapDimension>
+void
+SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & /*name*/)
+{
+  std::ifstream ifs(filename, std::ios::binary);
+  
+  /**  Read the model key (should be som) */
+  char s[]="   ";
+  for (int i=0; i<3; i++)
+    {  
+    internal::BinaryRead(ifs,s[i]);
+    }
+  std::string modelType(s);
+  /** Read the dimension of the map (should be equal to MapDimension) */
+  
+  unsigned int dimension;
+  internal::BinaryRead(ifs,dimension);
+  if (modelType != "som" || dimension != MapDimension)
+    {
+    itkExceptionMacro(<< "Error opening " << filename.c_str() );
+    }
+
+  SizeType size;
+  itk::Index< MapDimension > index;
+  for (unsigned int i=0 ; i<MapDimension; i++)
+    {
+    internal::BinaryRead(ifs,size[i]);
+    index[i]=0;
+    }
+  unsigned int numberOfElements;
+  internal::BinaryRead(ifs,numberOfElements);
+  m_SOMMap = MapType::New();
+  typename MapType::RegionType region;
+  region.SetSize( size );
+  m_SOMMap->SetNumberOfComponentsPerPixel(numberOfElements);
+  region.SetIndex( index );
+  m_SOMMap->SetRegions( region );
+  m_SOMMap->Allocate();
+
+  itk::ImageRegionIterator<MapType> outputIterator(m_SOMMap,region);
+  outputIterator.GoToBegin();
+  std::string value;
+  while(!outputIterator.IsAtEnd())
+    {
+    InputSampleType  vect(numberOfElements);
+    for (unsigned int i=0 ; i<numberOfElements; i++)
+      {
+      // InputValue type is not the same during training anddimredvector.
+      float v;
+      internal::BinaryRead(ifs,v);
+      vect[i] = static_cast<double>(v);
+      }
+    outputIterator.Set(vect);
+    ++outputIterator;
+    }
+  ifs.close();
+  this->m_Dimension = MapType::ImageDimension;
+}
+
+template <class TInputValue, unsigned int MapDimension>
+typename SOMModel<TInputValue, MapDimension>::TargetSampleType
+SOMModel<TInputValue, MapDimension>::DoPredict(
+  const InputSampleType & value,
+  ConfidenceValueType * /*quality*/) const
+{
+  TargetSampleType target;
+  target.SetSize(this->m_Dimension);
+
+  auto winner =m_SOMMap->GetWinner(value);
+  for (unsigned int i=0; i< this->m_Dimension ;i++)
+    {
+    target[i] = winner.GetElement(i); 
+    }
+  return target;
+}
+
+} // namespace otb
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.h b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.h
new file mode 100644
index 0000000000000000000000000000000000000000..71d314e5f5cad1bcf31ed7de8505d6dd164309bc
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbSOMModelFactory_h
+#define otbSOMModelFactory_h
+
+#include "itkObjectFactoryBase.h"
+#include "itkImageIOBase.h"
+
+namespace otb
+{
+
+/** \class SOMModelFactory
+ * 
+ * Factory for SOMModel
+ *
+ * \ingroup OTBDimensionalityReductionLearning
+ */
+template <class TInputValue, class TTargetValue, unsigned int MapDimension>
+class ITK_EXPORT SOMModelFactory : public itk::ObjectFactoryBase
+{
+public:
+  /** Standard class typedefs. */
+  typedef SOMModelFactory   Self;
+  typedef itk::ObjectFactoryBase        Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
+
+  /** Class methods used to interface with the registered factories. */
+  const char* GetITKSourceVersion(void) const ITK_OVERRIDE;
+  const char* GetDescription(void) const ITK_OVERRIDE;
+
+  /** Method for class instantiation. */
+  itkFactorylessNewMacro(Self);
+
+  /** Run-time type information (and related methods). */
+  itkTypeMacro(SOMModelFactory, itk::ObjectFactoryBase);
+
+  /** Register one factory of this type  */
+  static void RegisterOneFactory(void)
+  {
+    Pointer SOMFactory = SOMModelFactory::New();
+    itk::ObjectFactoryBase::RegisterFactory(SOMFactory);
+  }
+
+protected:
+  SOMModelFactory();
+  ~SOMModelFactory() ITK_OVERRIDE;
+
+private:
+  SOMModelFactory(const Self &); //purposely not implemented
+  void operator =(const Self&); //purposely not implemented
+
+};
+
+} //namespace otb
+
+#ifndef OTB_MANUAL_INSTANTIATION
+#include "otbSOMModelFactory.txx"
+#endif
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.txx
new file mode 100644
index 0000000000000000000000000000000000000000..5799660768e900fa37e2dc1f4365a5f37b66079d
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbSOMModelFactory.txx
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef otbSOMFactory_txx
+#define otbSOMFactory_txx
+
+#include "otbSOMModelFactory.h"
+
+#include "itkCreateObjectFunction.h"
+#include "otbSOMModel.h"
+#include "itkVersion.h"
+
+namespace otb
+{
+template <class TInputValue, class TOutputValue, unsigned int MapDimension>
+SOMModelFactory<TInputValue,TOutputValue,MapDimension>::SOMModelFactory()
+{
+  std::string classOverride = std::string("DimensionalityReductionModel");
+  std::string subclass = std::string("SOMModel");
+
+  this->RegisterOverride(
+    classOverride.c_str(),
+    subclass.c_str(),
+    "SOM DR Model",
+    1,
+    itk::CreateObjectFunction<SOMModel<TInputValue,  MapDimension>>::New());
+}
+
+template <class TInputValue, class TOutputValue, unsigned int MapDimension>
+SOMModelFactory<TInputValue,TOutputValue,MapDimension>::~SOMModelFactory()
+{
+}
+
+template <class TInputValue, class TOutputValue, unsigned int MapDimension>
+const char* SOMModelFactory<TInputValue,TOutputValue,MapDimension>::GetITKSourceVersion(void) const
+{
+  return ITK_SOURCE_VERSION;
+}
+
+template <class TInputValue, class TOutputValue, unsigned int MapDimension>
+const char* SOMModelFactory<TInputValue,TOutputValue,MapDimension>::GetDescription() const
+{
+  return "SOM model factory";
+}
+
+} // end namespace otb
+
+#endif
diff --git a/Modules/Learning/DimensionalityReductionLearning/otb-module.cmake b/Modules/Learning/DimensionalityReductionLearning/otb-module.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..aaff635d359f095f0e8cd79c32ebac618498e6bc
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/otb-module.cmake
@@ -0,0 +1,36 @@
+#
+# Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+#
+# This file is part of Orfeo Toolbox
+#
+#     https://www.orfeo-toolbox.org/
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set(DOCUMENTATION "Dimensionality reduction application")
+otb_module(OTBDimensionalityReductionLearning
+  DEPENDS
+    OTBCommon
+    OTBITK
+    OTBShark
+    OTBBoost
+    OTBSOM
+    OTBLearningBase
+
+  TEST_DEPENDS
+    OTBTestKernel
+
+  DESCRIPTION
+    "${DOCUMENTATION}"
+)
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/CMakeLists.txt b/Modules/Learning/DimensionalityReductionLearning/test/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e51906297744b458c625bcc8b0d78f65a67a529a
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/CMakeLists.txt
@@ -0,0 +1,102 @@
+#
+# Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+#
+# This file is part of Orfeo Toolbox
+#
+#     https://www.orfeo-toolbox.org/
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+otb_module_test()
+
+set(OTBDimensionalityReductionLearningTests
+otbDimensionalityReductionLearningTestDriver.cxx
+otbAutoencoderModelTest.cxx
+otbPCAModelTest.cxx
+otbSOMModelTest.cxx
+)
+
+add_executable(otbDimensionalityReductionLearningTestDriver ${OTBDimensionalityReductionLearningTests})
+target_link_libraries(otbDimensionalityReductionLearningTestDriver ${OTBDimensionalityReductionLearning-Test_LIBRARIES})
+otb_module_target_label(otbDimensionalityReductionLearningTestDriver)
+
+# Tests Declaration
+# --------------- Autoencoder --------------------------------
+otb_add_test(NAME leTuAutoencoderModelNew COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbAutoencoderModelNew
+  )
+
+otb_add_test(NAME leTvAutoencoderModelTrain COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbAutoencoderModeTrain
+  ${INPUTDATA}/letter_light.scale
+  ${TEMP}/model.ae
+  )
+
+otb_add_test(NAME leTvAutoencoderModelCanRead COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbAutoencoderModelCanRead
+  ${TEMP}/model.ae
+  )
+
+set_property(TEST leTvAutoencoderModelCanRead APPEND PROPERTY DEPENDS leTvAutoencoderModelTrain)
+
+# --------------- PCA --------------------------------
+otb_add_test(NAME leTuPCAModelNew COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbPCAModelNew
+  )
+
+otb_add_test(NAME leTvPCAModelTrain COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbPCAModeTrain
+  ${INPUTDATA}/letter_light.scale
+  ${TEMP}/model.pca
+  )
+
+otb_add_test(NAME leTvPCAModelCanRead COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbPCAModelCanRead
+  ${TEMP}/model.pca
+  )
+
+set_property(TEST leTvPCAModelCanRead APPEND PROPERTY DEPENDS leTvPCAModelTrain)
+
+# --------------- SOM --------------------------------
+otb_add_test(NAME leTuSOMModelNew COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbSOMModelNew
+  )
+
+otb_add_test(NAME leTvSOMModelTrain COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbSOMModeTrain
+  ${INPUTDATA}/letter_light.scale
+  ${TEMP}/model2D.som
+  ${TEMP}/model3D.som
+  ${TEMP}/model4D.som
+  ${TEMP}/model5D.som
+  )
+
+otb_add_test(NAME leTvSOMModelCanRead COMMAND
+  otbDimensionalityReductionLearningTestDriver
+  otbSOMModelCanRead
+  ${TEMP}/model2D.som
+  ${TEMP}/model3D.som
+  ${TEMP}/model4D.som
+  ${TEMP}/model5D.som
+  )
+
+set_property(TEST leTvSOMModelCanRead APPEND PROPERTY DEPENDS leTvSOMModelTrain)
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/otbAutoencoderModelTest.cxx b/Modules/Learning/DimensionalityReductionLearning/test/otbAutoencoderModelTest.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..ad3880f6f54b9e4fb234d4d3a9297a2918c02d27
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/otbAutoencoderModelTest.cxx
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "otbAutoencoderModel.h"
+#include "otbReadDataFile.h"
+#include "itkMacro.h"
+
+typedef otb::AutoencoderModel<double, shark::LogisticNeuron> LogAutoencoderModel;
+typedef LogAutoencoderModel::InputListSampleType InputListSampleType;
+typedef LogAutoencoderModel::TargetListSampleType TargetListSampleType;
+
+int otbAutoencoderModelNew(int itkNotUsed(argc), char * itkNotUsed(argv) [])
+{
+  LogAutoencoderModel::Pointer model = LogAutoencoderModel::New();
+
+  return EXIT_SUCCESS;
+}
+
+int otbAutoencoderModelCanRead(int argc, char * argv [])
+{
+  if (argc < 2)
+    {
+    std::cerr << "Usage: " << argv[0] << " <model>" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  LogAutoencoderModel::Pointer model = LogAutoencoderModel::New();
+  std::string filename(argv[1]);
+  if (! model->CanReadFile(filename) )
+    {
+    std::cerr << "Failed to read model file : "<< filename << std::endl;
+    return EXIT_FAILURE;
+    }
+  return EXIT_SUCCESS;
+}
+
+int otbAutoencoderModeTrain(int argc, char * argv [])
+{
+  if (argc < 3)
+    {
+    std::cerr << "Usage: " << argv[0] << " letter.scale  model.out" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  // Extract data from letter.scale
+  InputListSampleType::Pointer samples = InputListSampleType::New();
+  TargetListSampleType::Pointer target = TargetListSampleType::New();
+  if (!otb::ReadDataFile(argv[1], samples, target))
+    {
+    std::cout << "Failed to read samples file " << argv[1] << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  itk::Array<unsigned int> nb_neuron;
+  itk::Array<float> noise;
+  itk::Array<float> regularization;
+  itk::Array<float> rho;
+  itk::Array<float> beta;
+
+  nb_neuron.SetSize(1);
+  noise.SetSize(1);
+  regularization.SetSize(1);
+  rho.SetSize(1);
+  beta.SetSize(1);
+
+  nb_neuron[0] = 14;
+  noise[0] = 0.0;
+  regularization[0] = 0.01;
+  rho[0] = 0.0;
+  beta[0] = 0.0;
+
+  LogAutoencoderModel::Pointer model = LogAutoencoderModel::New();
+  model->SetNumberOfHiddenNeurons(nb_neuron);
+  model->SetNumberOfIterations(50);
+  model->SetNumberOfIterationsFineTuning(0);
+  model->SetEpsilon(0.0);
+  model->SetInitFactor(1.0);
+  model->SetRegularization(regularization);
+  model->SetNoise(noise);
+  model->SetRho(rho);
+  model->SetBeta(beta);
+  model->SetWriteWeights(true);
+  model->SetInputListSample(samples);
+  model->Train();
+  model->Save(std::string(argv[2]));
+
+  return EXIT_SUCCESS;
+}
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/otbDimensionalityReductionLearningTestDriver.cxx b/Modules/Learning/DimensionalityReductionLearning/test/otbDimensionalityReductionLearningTestDriver.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..6e0c644798e2501bbc77a3c7dc3a28c55b87f5d8
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/otbDimensionalityReductionLearningTestDriver.cxx
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "otbTestMain.h"
+
+void RegisterTests()
+{
+  REGISTER_TEST(otbAutoencoderModelNew);
+  REGISTER_TEST(otbAutoencoderModelCanRead);
+  REGISTER_TEST(otbAutoencoderModeTrain);
+  REGISTER_TEST(otbPCAModelNew);
+  REGISTER_TEST(otbPCAModelCanRead);
+  REGISTER_TEST(otbPCAModeTrain);
+  REGISTER_TEST(otbSOMModelNew);
+  REGISTER_TEST(otbSOMModelCanRead);
+  REGISTER_TEST(otbSOMModeTrain);
+}
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/otbImageDimensionalityReductionFilterTest.cxx b/Modules/Learning/DimensionalityReductionLearning/test/otbImageDimensionalityReductionFilterTest.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..a2b8da9d9a15553f2681fa732d90711d1b24306f
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/otbImageDimensionalityReductionFilterTest.cxx
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/otbPCAModelTest.cxx b/Modules/Learning/DimensionalityReductionLearning/test/otbPCAModelTest.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..6fe3945e43729e464de4f264025dd78fb8610e16
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/otbPCAModelTest.cxx
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "otbPCAModel.h"
+#include "otbReadDataFile.h"
+
+typedef otb::PCAModel<double> PCAModelType;
+typedef PCAModelType::InputListSampleType InputListSampleType;
+typedef PCAModelType::TargetListSampleType TargetListSampleType;
+
+int otbPCAModelNew(int itkNotUsed(argc), char * itkNotUsed(argv) [])
+{
+  PCAModelType::Pointer model = PCAModelType::New();
+
+  return EXIT_SUCCESS;
+}
+
+int otbPCAModelCanRead(int argc, char * argv [])
+{
+  if (argc < 2)
+    {
+    std::cerr << "Usage: " << argv[0] << " <model>" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  PCAModelType::Pointer model = PCAModelType::New();
+  std::string filename(argv[1]);
+  if (! model->CanReadFile(filename) )
+    {
+    std::cerr << "Failed to read model file : "<< filename << std::endl;
+    return EXIT_FAILURE;
+    }
+  return EXIT_SUCCESS;
+}
+
+int otbPCAModeTrain(int argc, char * argv [])
+{
+  if (argc < 3)
+    {
+    std::cerr << "Usage: " << argv[0] << " letter.scale  model.out" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  // Extract data from letter.scale
+  InputListSampleType::Pointer samples = InputListSampleType::New();
+  TargetListSampleType::Pointer target = TargetListSampleType::New();
+  if (!otb::ReadDataFile(argv[1], samples, target))
+    {
+    std::cout << "Failed to read samples file " << argv[1] << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  PCAModelType::Pointer model = PCAModelType::New();
+  model->SetDimension(14);
+  model->SetWriteEigenvectors(true);
+  model->SetInputListSample(samples);
+  model->Train();
+  model->Save(std::string(argv[2]));
+
+  return EXIT_SUCCESS;
+}
diff --git a/Modules/Learning/DimensionalityReductionLearning/test/otbSOMModelTest.cxx b/Modules/Learning/DimensionalityReductionLearning/test/otbSOMModelTest.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..092cac8404ced25b8a106e5ac5037bbaa06ced71
--- /dev/null
+++ b/Modules/Learning/DimensionalityReductionLearning/test/otbSOMModelTest.cxx
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "otbSOMModel.h"
+#include "otbReadDataFile.h"
+
+typedef otb::SOMModel<double,2> SOMModel2D;
+typedef otb::SOMModel<double,3> SOMModel3D;
+typedef otb::SOMModel<double,4> SOMModel4D;
+typedef otb::SOMModel<double,5> SOMModel5D;
+
+typedef SOMModel2D::InputListSampleType InputListSampleType;
+typedef SOMModel2D::TargetListSampleType TargetListSampleType;
+
+int otbSOMModelNew(int itkNotUsed(argc), char * itkNotUsed(argv) [])
+{
+  SOMModel2D::Pointer model2D = SOMModel2D::New();
+  SOMModel3D::Pointer model3D = SOMModel3D::New();
+  SOMModel4D::Pointer model4D = SOMModel4D::New();
+  SOMModel5D::Pointer model5D = SOMModel5D::New();
+
+  return EXIT_SUCCESS;
+}
+
+int otbSOMModelCanRead(int argc, char * argv [])
+{
+  if (argc < 2)
+    {
+    std::cerr << "Usage: " << argv[0] << " <model2D> <model3D> <model4D> <model5D>" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  std::string filename2D(argv[1]);
+  std::string filename3D(argv[2]);
+  std::string filename4D(argv[3]);
+  std::string filename5D(argv[4]);
+
+  SOMModel2D::Pointer model2D = SOMModel2D::New();
+  SOMModel3D::Pointer model3D = SOMModel3D::New();
+  SOMModel4D::Pointer model4D = SOMModel4D::New();
+  SOMModel5D::Pointer model5D = SOMModel5D::New();
+
+  if (! model2D->CanReadFile(filename2D) )
+    {
+    std::cerr << "Failed to read model file : "<< filename2D << std::endl;
+    return EXIT_FAILURE;
+    }
+  if (! model3D->CanReadFile(filename3D) )
+    {
+    std::cerr << "Failed to read model file : "<< filename3D << std::endl;
+    return EXIT_FAILURE;
+    }
+  if (! model4D->CanReadFile(filename4D) )
+    {
+    std::cerr << "Failed to read model file : "<< filename4D << std::endl;
+    return EXIT_FAILURE;
+    }
+  if (! model5D->CanReadFile(filename5D) )
+    {
+    std::cerr << "Failed to read model file : "<< filename5D << std::endl;
+    return EXIT_FAILURE;
+    }
+  return EXIT_SUCCESS;
+}
+
+int otbSOMModeTrain(int argc, char * argv [])
+{
+  if (argc < 3)
+    {
+    std::cerr << "Usage: " << argv[0] << " letter.scale  model2D.out model3D.out model4D.out model5D.out" << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  // Extract data from letter.scale
+  InputListSampleType::Pointer samples = InputListSampleType::New();
+  TargetListSampleType::Pointer target = TargetListSampleType::New();
+  if (!otb::ReadDataFile(argv[1], samples, target))
+    {
+    std::cout << "Failed to read samples file " << argv[1] << std::endl;
+    return EXIT_FAILURE;
+    }
+
+  SOMModel2D::Pointer model2D = SOMModel2D::New();
+  SOMModel3D::Pointer model3D = SOMModel3D::New();
+  SOMModel4D::Pointer model4D = SOMModel4D::New();
+  SOMModel5D::Pointer model5D = SOMModel5D::New();
+
+  SOMModel2D::SizeType size2D, radius2D;
+  size2D.Fill(10);
+  radius2D.Fill(3);
+  SOMModel3D::SizeType size3D, radius3D;
+  size3D.Fill(6);
+  radius3D.Fill(3);
+  SOMModel4D::SizeType size4D, radius4D;
+  size4D.Fill(4);
+  radius4D.Fill(2);
+  SOMModel5D::SizeType size5D, radius5D;
+  size5D.Fill(3);
+  radius5D.Fill(2);
+
+  std::cout << "Train 2D model..."<< std::endl;
+  model2D->SetNumberOfIterations(10);
+  model2D->SetBetaInit(1.0);
+  model2D->SetWriteMap(true);
+  model2D->SetBetaEnd(0.1);
+  model2D->SetMaxWeight(10.0);
+  model2D->SetMapSize(size2D);
+  model2D->SetNeighborhoodSizeInit(radius2D);
+  model2D->SetInputListSample(samples);
+  model2D->Train();
+  model2D->Save(std::string(argv[2]));
+
+  std::cout << "Train 3D model..."<< std::endl;
+  model3D->SetNumberOfIterations(10);
+  model3D->SetBetaInit(1.0);
+  model3D->SetWriteMap(true);
+  model3D->SetBetaEnd(0.1);
+  model3D->SetMaxWeight(10.0);
+  model3D->SetMapSize(size3D);
+  model3D->SetNeighborhoodSizeInit(radius3D);
+  model3D->SetInputListSample(samples);
+  model3D->Train();
+  model3D->Save(std::string(argv[3]));
+
+  std::cout << "Train 4D model..."<< std::endl;
+  model4D->SetNumberOfIterations(10);
+  model4D->SetBetaInit(1.0);
+  model4D->SetWriteMap(true);
+  model4D->SetBetaEnd(0.1);
+  model4D->SetMaxWeight(10.0);
+  model4D->SetMapSize(size4D);
+  model4D->SetNeighborhoodSizeInit(radius4D);
+  model4D->SetInputListSample(samples);
+  model4D->Train();
+  model4D->Save(std::string(argv[4]));
+
+  std::cout << "Train 5D model..."<< std::endl;
+  model5D->SetNumberOfIterations(10);
+  model5D->SetBetaInit(1.0);
+  model5D->SetWriteMap(true);
+  model5D->SetBetaEnd(0.1);
+  model5D->SetMaxWeight(10.0);
+  model5D->SetMapSize(size5D);
+  model5D->SetNeighborhoodSizeInit(radius5D);
+  model5D->SetInputListSample(samples);
+  model5D->Train();
+  model5D->Save(std::string(argv[5]));
+
+  return EXIT_SUCCESS;
+}
+
diff --git a/Modules/Learning/LearningBase/CMakeLists.txt b/Modules/Learning/LearningBase/CMakeLists.txt
index b3460da963ae8c229aee0b34e3120fb27d27d6ec..45b573a3dc2006a8f638f7b81a80880adebc334a 100644
--- a/Modules/Learning/LearningBase/CMakeLists.txt
+++ b/Modules/Learning/LearningBase/CMakeLists.txt
@@ -20,4 +20,6 @@
 
 project(OTBLearningBase)
 
+set(OTBLearningBase_LIBRARIES OTBLearningBase)
+
 otb_module_impl()
diff --git a/Modules/Learning/LearningBase/include/otbMachineLearningModel.h b/Modules/Learning/LearningBase/include/otbMachineLearningModel.h
index 552880cb9a83a4e43b7e29ae75689613633e5ef0..2bfd9177ff00feda39a6c251dc62cb7e75706733 100644
--- a/Modules/Learning/LearningBase/include/otbMachineLearningModel.h
+++ b/Modules/Learning/LearningBase/include/otbMachineLearningModel.h
@@ -116,7 +116,11 @@ public:
      */
   TargetSampleType Predict(const InputSampleType& input, ConfidenceValueType *quality = ITK_NULLPTR) const;
 
-
+  /**\name Set and get the dimension of the model for dimensionality reduction models */
+  //@{
+  itkSetMacro(Dimension,unsigned int);
+  itkGetMacro(Dimension,unsigned int);
+  //@}		
 
   /** Predict a batch of samples (InputListSampleType)
     * \param input The batch of sample to predict
@@ -128,29 +132,29 @@ public:
      */
   typename TargetListSampleType::Pointer PredictBatch(const InputListSampleType * input, ConfidenceListSampleType * quality = ITK_NULLPTR) const;
   
-  /**\name Classification model file manipulation */
-  //@{
-  /** Save the model to file */
+/**\name Classification model file manipulation */
+//@{
+/** Save the model to file */
   virtual void Save(const std::string & filename, const std::string & name="") = 0;
 
-  /** Load the model from file */
+/** Load the model from file */
   virtual void Load(const std::string & filename, const std::string & name="") = 0;
-  //@}
+//@}
 
-  /**\name Classification model file compatibility tests */
-  //@{
-  /** Is the input model file readable and compatible with the corresponding classifier ? */
+/**\name Classification model file compatibility tests */
+//@{
+/** Is the input model file readable and compatible with the corresponding classifier ? */
   virtual bool CanReadFile(const std::string &) = 0;
 
-  /** Is the input model file writable and compatible with the corresponding classifier ? */
+/** Is the input model file writable and compatible with the corresponding classifier ? */
   virtual bool CanWriteFile(const std::string &)  = 0;
-  //@}
+//@}
 
-  /** Query capacity to produce a confidence index */
+/** Query capacity to produce a confidence index */
   bool HasConfidenceIndex() const {return m_ConfidenceIndex;}
 
-  /**\name Input list of samples accessors */
-  //@{
+/**\name Input list of samples accessors */
+//@{
   itkSetObjectMacro(InputListSample,InputListSampleType);
   itkGetObjectMacro(InputListSample,InputListSampleType);
   itkGetConstObjectMacro(InputListSample,InputListSampleType);
@@ -185,6 +189,9 @@ protected:
   /** Input list sample */
   typename InputListSampleType::Pointer m_InputListSample;
 
+  /** Validation list sample if provided for some models */  
+  typename InputListSampleType::Pointer m_ValidationListSample; 
+  
   /** Target list sample */
   typename TargetListSampleType::Pointer m_TargetListSample;
 
@@ -192,7 +199,7 @@ protected:
   
   /** flag to choose between classification and regression modes */
   bool m_RegressionMode;
-  
+    
   /** flag that indicates if the model supports regression, child
    *  classes should modify it in their constructor if they support
    *  regression mode */
@@ -203,6 +210,9 @@ protected:
 
   /** Is DoPredictBatch multi-threaded ? */
   bool m_IsDoPredictBatchMultiThreaded;
+  
+  /** Output Dimension of the model, used by Dimensionality Reduction models*/
+  unsigned int m_Dimension;
 
 private:
   /**  Actual implementation of BatchPredicition
diff --git a/Modules/Learning/LearningBase/include/otbMachineLearningModel.txx b/Modules/Learning/LearningBase/include/otbMachineLearningModel.txx
index 00da0413879e83f013cf98053413e81dbcac0193..a983cb2fa0aaa6f6b0a7d7747e0ee1e0ac7636a5 100644
--- a/Modules/Learning/LearningBase/include/otbMachineLearningModel.txx
+++ b/Modules/Learning/LearningBase/include/otbMachineLearningModel.txx
@@ -38,7 +38,8 @@ MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
   m_RegressionMode(false),
   m_IsRegressionSupported(false),
   m_ConfidenceIndex(false),
-  m_IsDoPredictBatchMultiThreaded(false)
+  m_IsDoPredictBatchMultiThreaded(false),
+  m_Dimension(0)
 {}
 
 
@@ -98,11 +99,11 @@ MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
   else
     {
     
-    #ifdef _OPENMP
+#ifdef _OPENMP
     // OpenMP threading here
     unsigned int nb_threads(0), threadId(0), nb_batches(0);
     
-    #pragma omp parallel shared(nb_threads,nb_batches) private(threadId)
+#pragma omp parallel shared(nb_threads,nb_batches) private(threadId)
     {
     // Get number of threads configured with ITK
     omp_set_num_threads(itk::MultiThreader::GetGlobalDefaultNumberOfThreads());
@@ -122,9 +123,9 @@ MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
       this->DoPredictBatch(input,batch_start,batch_size,targets,quality);
       }
     }
-    #else
+#else
     this->DoPredictBatch(input,0,input->Size(),targets,quality);
-    #endif
+#endif
     return targets;
     }
 }
@@ -169,12 +170,12 @@ MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
 
 template <class TInputValue, class TOutputValue, class TConfidenceValue>
 void
-MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
-::PrintSelf(std::ostream& os, itk::Indent indent) const
-{
-  // Call superclass implementation
-  Superclass::PrintSelf(os,indent);
-}
-}
+    MachineLearningModel<TInputValue,TOutputValue,TConfidenceValue>
+    ::PrintSelf(std::ostream& os, itk::Indent indent) const
+    {
+    // Call superclass implementation
+    Superclass::PrintSelf(os,indent);
+    }
+    }
 
 #endif
diff --git a/Modules/Learning/LearningBase/include/otbMachineLearningModelFactoryBase.h b/Modules/Learning/LearningBase/include/otbMachineLearningModelFactoryBase.h
index 0ca61f2c37483d0735c146f9355b4a4a8d4c5d35..012e0f1d77935bf1fffaed129bebcf8a8492368f 100644
--- a/Modules/Learning/LearningBase/include/otbMachineLearningModelFactoryBase.h
+++ b/Modules/Learning/LearningBase/include/otbMachineLearningModelFactoryBase.h
@@ -22,7 +22,7 @@
 #define otbMachineLearningModelFactoryBase_h
 
 #include "itkMutexLock.h"
-#include "OTBSupervisedExport.h"
+#include "OTBLearningBaseExport.h"
 
 namespace otb
 {
@@ -34,7 +34,7 @@ namespace otb
  *
  * \ingroup OTBLearningBase
  */
-class OTBSupervised_EXPORT MachineLearningModelFactoryBase : public itk::Object
+class OTBLearningBase_EXPORT MachineLearningModelFactoryBase : public itk::Object
 {
 public:
   /** Standard class typedefs. */
diff --git a/Modules/Learning/LearningBase/otb-module.cmake b/Modules/Learning/LearningBase/otb-module.cmake
index b4fab23bbaec200852575ff64dbb2424475542c6..afa2a339a1813cf16e5f6ea3700f079a36180dcd 100644
--- a/Modules/Learning/LearningBase/otb-module.cmake
+++ b/Modules/Learning/LearningBase/otb-module.cmake
@@ -22,19 +22,15 @@ set(DOCUMENTATION "This module contains OTB generic Machine Learning framework
 mainly based on OpenCV.")
 
 otb_module(OTBLearningBase
+  ENABLE_SHARED
   DEPENDS
     OTBCommon
-    OTBITK
-    OTBImageIO
     OTBImageBase
-
-  OPTIONAL_DEPENDS
-    OTBShark
+    OTBITK
 
   TEST_DEPENDS
     OTBTestKernel
     OTBImageIO
-    OTBImageBase
 
   DESCRIPTION
     "${DOCUMENTATION}"
diff --git a/Modules/Learning/LearningBase/src/CMakeLists.txt b/Modules/Learning/LearningBase/src/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4a7ef1c0d25c764f375e5b6fc3652efd2c430963
--- /dev/null
+++ b/Modules/Learning/LearningBase/src/CMakeLists.txt
@@ -0,0 +1,32 @@
+#
+# Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+#
+# This file is part of Orfeo Toolbox
+#
+#     https://www.orfeo-toolbox.org/
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set(OTBLearningBase_SRC
+  otbMachineLearningModelFactoryBase.cxx
+  )
+
+add_library(OTBLearningBase ${OTBLearningBase_SRC})
+target_link_libraries(OTBLearningBase
+  ${OTBCommon_LIBRARIES}
+  ${OTBImageBase_LIBRARIES}
+  ${OTBITK_LIBRARIES}
+  )
+
+otb_module_target(OTBLearningBase)
diff --git a/Modules/Learning/Supervised/src/otbMachineLearningModelFactoryBase.cxx b/Modules/Learning/LearningBase/src/otbMachineLearningModelFactoryBase.cxx
similarity index 100%
rename from Modules/Learning/Supervised/src/otbMachineLearningModelFactoryBase.cxx
rename to Modules/Learning/LearningBase/src/otbMachineLearningModelFactoryBase.cxx
diff --git a/Modules/Learning/Supervised/src/CMakeLists.txt b/Modules/Learning/Supervised/src/CMakeLists.txt
index 7a3c77b3a7e2992216d16f444178abb697424dc0..45e881904bbbf61bec7bb8a158f8661806d64896 100644
--- a/Modules/Learning/Supervised/src/CMakeLists.txt
+++ b/Modules/Learning/Supervised/src/CMakeLists.txt
@@ -19,7 +19,6 @@
 #
 
 set(OTBSupervised_SRC
-  otbMachineLearningModelFactoryBase.cxx
   otbExhaustiveExponentialOptimizer.cxx
   )
 
@@ -33,6 +32,7 @@ target_link_libraries(OTBSupervised
   ${OTBLibSVM_LIBRARIES}
   ${OTBOpenCV_LIBRARIES}
   ${OTBShark_LIBRARIES}
+  ${OTBLearningBase_LIBRARIES}
   )
 
 otb_module_target(OTBSupervised)
diff --git a/Modules/Learning/Supervised/test/otbTrainMachineLearningModel.cxx b/Modules/Learning/Supervised/test/otbTrainMachineLearningModel.cxx
index 0633cd0f53a7acbf991cf2d727b1e766c9a753a3..dd34b41b8b3a4f9711530f99892f5780515e2566 100644
--- a/Modules/Learning/Supervised/test/otbTrainMachineLearningModel.cxx
+++ b/Modules/Learning/Supervised/test/otbTrainMachineLearningModel.cxx
@@ -26,6 +26,8 @@
 #include <otbMachineLearningModel.h>
 #include "otbConfusionMatrixCalculator.h"
 
+#include "otbReadDataFile.h"
+
 #include "otb_boost_string_header.h"
 
 typedef otb::MachineLearningModel<float,short>         MachineLearningModelType;
@@ -46,142 +48,6 @@ typedef MachineLearningModelRegressionType::TargetListSampleType TargetListSampl
 
 typedef otb::ConfusionMatrixCalculator<TargetListSampleType, TargetListSampleType> ConfusionMatrixCalculatorType;
 
-bool ReadDataFile(const std::string & infname, InputListSampleType * samples, TargetListSampleType * labels)
-{
-  std::ifstream ifs;
-  ifs.open(infname.c_str());
-
-  if(!ifs)
-    {
-    std::cout<<"Could not read file "<<infname<<std::endl;
-    return false;
-    }
-
-  unsigned int nbfeatures = 0;
-
-  while (!ifs.eof())
-    {
-    std::string line;
-    std::getline(ifs, line);
-    boost::algorithm::trim(line);
-
-    if(nbfeatures == 0)
-      {
-      nbfeatures = std::count(line.begin(),line.end(),' ');
-      }
-
-    if(line.size()>1)
-      {
-      InputSampleType sample(nbfeatures);
-      sample.Fill(0);
-
-      std::string::size_type pos = line.find_first_of(" ", 0);
-
-      // Parse label
-      TargetSampleType label;
-      label[0] = atoi(line.substr(0, pos).c_str());
-
-      bool endOfLine = false;
-      unsigned int id = 0;
-
-      while(!endOfLine)
-        {
-        std::string::size_type nextpos = line.find_first_of(" ", pos+1);
-
-        if(pos == std::string::npos)
-          {
-          endOfLine = true;
-          nextpos = line.size()-1;
-          }
-        else
-          {
-          std::string feature = line.substr(pos,nextpos-pos);
-          std::string::size_type semicolonpos = feature.find_first_of(":");
-          id = atoi(feature.substr(0,semicolonpos).c_str());
-          sample[id - 1] = atof(feature.substr(semicolonpos+1,feature.size()-semicolonpos).c_str());
-          pos = nextpos;
-          }
-
-        }
-      samples->SetMeasurementVectorSize(itk::NumericTraits<InputSampleType>::GetLength(sample));
-      samples->PushBack(sample);
-      labels->PushBack(label);
-      }
-    }
-
-  //std::cout<<"Retrieved "<<samples->Size()<<" samples"<<std::endl;
-  ifs.close();
-  return true;
-}
-
-bool ReadDataRegressionFile(const std::string & infname, InputListSampleRegressionType * samples, TargetListSampleRegressionType * labels)
-{
-  std::ifstream ifs;
-  ifs.open(infname.c_str());
-
-  if(!ifs)
-    {
-    std::cout<<"Could not read file "<<infname<<std::endl;
-    return false;
-    }
-
-  unsigned int nbfeatures = 0;
-
-  while (!ifs.eof())
-    {
-    std::string line;
-    std::getline(ifs, line);
-
-    if(nbfeatures == 0)
-      {
-      nbfeatures = std::count(line.begin(),line.end(),' ')-1;
-      //std::cout<<"Found "<<nbfeatures<<" features per samples"<<std::endl;
-      }
-
-    if(line.size()>1)
-      {
-      InputSampleRegressionType sample(nbfeatures);
-      sample.Fill(0);
-
-      std::string::size_type pos = line.find_first_of(" ", 0);
-
-      // Parse label
-      TargetSampleRegressionType label;
-      label[0] = atof(line.substr(0, pos).c_str());
-
-      bool endOfLine = false;
-      unsigned int id = 0;
-
-      while(!endOfLine)
-        {
-        std::string::size_type nextpos = line.find_first_of(" ", pos+1);
-
-        if(nextpos == std::string::npos)
-          {
-          endOfLine = true;
-          nextpos = line.size()-1;
-          }
-        else
-          {
-          std::string feature = line.substr(pos,nextpos-pos);
-          std::string::size_type semicolonpos = feature.find_first_of(":");
-          id = atoi(feature.substr(0,semicolonpos).c_str());
-          sample[id - 1] = atof(feature.substr(semicolonpos+1,feature.size()-semicolonpos).c_str());
-          pos = nextpos;
-          }
-
-        }
-      samples->SetMeasurementVectorSize(itk::NumericTraits<InputSampleRegressionType>::GetLength(sample));
-      samples->PushBack(sample);
-      labels->PushBack(label);
-      }
-    }
-
-  //std::cout<<"Retrieved "<<samples->Size()<<" samples"<<std::endl;
-  ifs.close();
-  return true;
-}
-
 #ifdef OTB_USE_LIBSVM
 #include "otbLibSVMMachineLearningModel.h"
 int otbLibSVMMachineLearningModelNew(int itkNotUsed(argc), char * itkNotUsed(argv) [])
@@ -205,7 +71,7 @@ int otbLibSVMMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if (!ReadDataFile(argv[1], samples, labels))
+  if (!otb::ReadDataFile(argv[1], samples, labels))
     {
     std::cout << "Failed to read samples file " << argv[1] << std::endl;
     return EXIT_FAILURE;
@@ -294,7 +160,7 @@ int otbSVMMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -364,7 +230,7 @@ int otbSVMMachineLearningRegressionModel(int argc, char * argv[])
   InputListSampleRegressionType::Pointer samples = InputListSampleRegressionType::New();
   TargetListSampleRegressionType::Pointer labels = TargetListSampleRegressionType::New();
 
-  if(!ReadDataRegressionFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -439,7 +305,7 @@ int otbKNearestNeighborsMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -516,7 +382,7 @@ int otbRandomForestsMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -603,7 +469,7 @@ int otbBoostMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -689,7 +555,7 @@ int otbANNMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if (!ReadDataFile(argv[1], samples, labels))
+  if (!otb::ReadDataFile(argv[1], samples, labels))
     {
     std::cout << "Failed to read samples file " << argv[1] << std::endl;
     return EXIT_FAILURE;
@@ -779,7 +645,7 @@ int otbNormalBayesMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -856,7 +722,7 @@ int otbDecisionTreeMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -934,7 +800,7 @@ int otbGradientBoostedTreeMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!ReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
@@ -995,139 +861,6 @@ int otbGradientBoostedTreeMachineLearningModel(int argc, char * argv[])
 #ifdef OTB_USE_SHARK
 #include <chrono> // If shark is on, then we are using c++11
 
-bool SharkReadDataFile(const std::string & infname, InputListSampleType * samples, TargetListSampleType * labels)
-{
-  std::ifstream ifs(infname.c_str());
-
-  if(!ifs)
-    {
-    std::cout<<"Could not read file "<<infname<<std::endl;
-    return false;
-    }
-
-  unsigned int nbfeatures = 0;
-
-  std::string line;
-  while (std::getline(ifs, line))
-    {
-    boost::algorithm::trim(line);
-
-    if(nbfeatures == 0)
-      {
-      nbfeatures = std::count(line.begin(),line.end(),' ');
-      }
-
-    if(line.size()>1)
-      {
-      InputSampleType sample(nbfeatures);
-      sample.Fill(0);
-
-      std::string::size_type pos = line.find_first_of(" ", 0);
-
-      // Parse label
-      TargetSampleType label;
-      label[0] = std::stoi(line.substr(0, pos).c_str());
-
-      bool endOfLine = false;
-      unsigned int id = 0;
-
-      while(!endOfLine)
-        {
-        std::string::size_type nextpos = line.find_first_of(" ", pos+1);
-
-        if(pos == std::string::npos)
-          {
-          endOfLine = true;
-          nextpos = line.size()-1;
-          }
-        else
-          {
-          std::string feature = line.substr(pos,nextpos-pos);
-          std::string::size_type semicolonpos = feature.find_first_of(":");
-          id = std::stoi(feature.substr(0,semicolonpos).c_str());
-          sample[id - 1] = atof(feature.substr(semicolonpos+1,feature.size()-semicolonpos).c_str());
-          pos = nextpos;
-          }
-
-        }
-      samples->SetMeasurementVectorSize(itk::NumericTraits<InputSampleType>::GetLength(sample));
-      samples->PushBack(sample);
-      labels->PushBack(label);
-      }
-    }
-
-  //std::cout<<"Retrieved "<<samples->Size()<<" samples"<<std::endl;
-  ifs.close();
-  return true;
-}
-
-bool SharkReadDataRegressionFile(const std::string & infname, InputListSampleRegressionType * samples, TargetListSampleRegressionType * labels)
-{
-  std::ifstream ifs(infname.c_str());
-  if(!ifs)
-    {
-    std::cout<<"Could not read file "<<infname<<std::endl;
-    return false;
-    }
-
-  unsigned int nbfeatures = 0;
-
-  while (!ifs.eof())
-    {
-    std::string line;
-    std::getline(ifs, line);
-
-    if(nbfeatures == 0)
-      {
-      nbfeatures = std::count(line.begin(),line.end(),' ')-1;
-      //std::cout<<"Found "<<nbfeatures<<" features per samples"<<std::endl;
-      }
-
-    if(line.size()>1)
-      {
-      InputSampleRegressionType sample(nbfeatures);
-      sample.Fill(0);
-
-      std::string::size_type pos = line.find_first_of(" ", 0);
-
-      // Parse label
-      TargetSampleRegressionType label;
-      label[0] = atof(line.substr(0, pos).c_str());
-
-      bool endOfLine = false;
-      unsigned int id = 0;
-
-      while(!endOfLine)
-        {
-        std::string::size_type nextpos = line.find_first_of(" ", pos+1);
-
-        if(nextpos == std::string::npos)
-          {
-          endOfLine = true;
-          nextpos = line.size()-1;
-          }
-        else
-          {
-          std::string feature = line.substr(pos,nextpos-pos);
-          std::string::size_type semicolonpos = feature.find_first_of(":");
-          id = std::stoi(feature.substr(0,semicolonpos).c_str());
-          sample[id - 1] = atof(feature.substr(semicolonpos+1,feature.size()-semicolonpos).c_str());
-          pos = nextpos;
-          }
-
-        }
-      samples->SetMeasurementVectorSize(itk::NumericTraits<InputSampleRegressionType>::GetLength(sample));
-      samples->PushBack(sample);
-      labels->PushBack(label);
-      }
-    }
-
-  //std::cout<<"Retrieved "<<samples->Size()<<" samples"<<std::endl;
-  ifs.close();
-  return true;
-}
-
-
 #include "otbSharkRandomForestsMachineLearningModel.h"
 int otbSharkRFMachineLearningModelNew(int itkNotUsed(argc), char * itkNotUsed(argv) [])
 {
@@ -1149,7 +882,7 @@ int otbSharkRFMachineLearningModel(int argc, char * argv[])
   InputListSampleType::Pointer samples = InputListSampleType::New();
   TargetListSampleType::Pointer labels = TargetListSampleType::New();
 
-  if(!SharkReadDataFile(argv[1],samples,labels))
+  if(!otb::ReadDataFile(argv[1],samples,labels))
     {
     std::cout<<"Failed to read samples file "<<argv[1]<<std::endl;
     return EXIT_FAILURE;
diff --git a/Modules/Learning/Supervised/test/tests-libsvm.cmake b/Modules/Learning/Supervised/test/tests-libsvm.cmake
index bbf6647ab09866b3074d840a84e301f9956bc48f..03b5118cd9ffb9eb538df0efe8452ccd0fff30cc 100644
--- a/Modules/Learning/Supervised/test/tests-libsvm.cmake
+++ b/Modules/Learning/Supervised/test/tests-libsvm.cmake
@@ -20,7 +20,7 @@
 
 otb_add_test(NAME leTvLibSVMMachineLearningModel COMMAND otbSupervisedTestDriver
   otbLibSVMMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/libsvm_model.txt
   )
 otb_add_test(NAME leTuLibSVMMachineLearningModelNew COMMAND otbSupervisedTestDriver
diff --git a/Modules/Learning/Supervised/test/tests-opencv.cmake b/Modules/Learning/Supervised/test/tests-opencv.cmake
index a309c341dc2b06f83d53b4a4aeab2305b9cf68a1..f898971f56ffc964adf8f1a352a3628fc7ba6990 100644
--- a/Modules/Learning/Supervised/test/tests-opencv.cmake
+++ b/Modules/Learning/Supervised/test/tests-opencv.cmake
@@ -26,7 +26,7 @@ otb_add_test(NAME leTuRandomForestsMachineLearningModelNew COMMAND otbSupervised
 
 otb_add_test(NAME leTvANNMachineLearningModel COMMAND otbSupervisedTestDriver
   otbANNMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/ann_model.txt
   )
 
@@ -74,7 +74,7 @@ otb_add_test(NAME leTvSVMMachineLearningRegressionModel COMMAND otbSupervisedTes
 
 otb_add_test(NAME leTvSVMMachineLearningModel COMMAND otbSupervisedTestDriver
   otbSVMMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/svm_model.txt
   )
 
@@ -83,14 +83,14 @@ otb_add_test(NAME leTuBoostMachineLearningModelNew COMMAND otbSupervisedTestDriv
 
 otb_add_test(NAME leTvNormalBayesMachineLearningModel COMMAND otbSupervisedTestDriver
   otbNormalBayesMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/normalbayes_model.txt
   )
 
 if(NOT OTB_OPENCV_3)
 otb_add_test(NAME leTvGradientBoostedTreeMachineLearningModel COMMAND otbSupervisedTestDriver
   otbGradientBoostedTreeMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/gbt_model.txt
   )
 otb_add_test(NAME leTuGradientBoostedTreeMachineLearningModelNew COMMAND otbSupervisedTestDriver
@@ -99,7 +99,7 @@ endif()
 
 otb_add_test(NAME leTvRandomForestsMachineLearningModel COMMAND otbSupervisedTestDriver
   otbRandomForestsMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/rf_model.txt
   )
 
@@ -108,19 +108,19 @@ otb_add_test(NAME leTuANNMachineLearningModelNew COMMAND otbSupervisedTestDriver
 
 otb_add_test(NAME leTvKNearestNeighborsMachineLearningModel COMMAND otbSupervisedTestDriver
   otbKNearestNeighborsMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/knn_model.txt
   )
 
 otb_add_test(NAME leTvDecisionTreeMachineLearningModel COMMAND otbSupervisedTestDriver
   otbDecisionTreeMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/decisiontree_model.txt
   )
 
 otb_add_test(NAME leTvBoostMachineLearningModel COMMAND otbSupervisedTestDriver
   otbBoostMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/boost_model.txt
   )
 
diff --git a/Modules/Learning/Supervised/test/tests-shark.cmake b/Modules/Learning/Supervised/test/tests-shark.cmake
index eeda8fff3f9f1413dcffe868c3fb5f8cb4f8f6d0..546e826043b31d284840fc3bed69250b82ad6dfb 100644
--- a/Modules/Learning/Supervised/test/tests-shark.cmake
+++ b/Modules/Learning/Supervised/test/tests-shark.cmake
@@ -23,7 +23,7 @@ otb_add_test(NAME leTuSharkRFMachineLearningModelNew COMMAND otbSupervisedTestDr
 
 otb_add_test(NAME leTvSharkRFMachineLearningModel COMMAND otbSupervisedTestDriver
   otbSharkRFMachineLearningModel
-  ${INPUTDATA}/letter.scale
+  ${INPUTDATA}/letter_light.scale
   ${TEMP}/shark_rf_model.txt
   )
 
diff --git a/Modules/Learning/LearningBase/include/otbSharkUtils.h b/Modules/ThirdParty/Shark/include/otbSharkUtils.h
similarity index 87%
rename from Modules/Learning/LearningBase/include/otbSharkUtils.h
rename to Modules/ThirdParty/Shark/include/otbSharkUtils.h
index 9efcf948bdbbfd7e9a672068677f029ac10c7d39..de3adf77401d0f131d2bd7d447627829b3df64ff 100644
--- a/Modules/Learning/LearningBase/include/otbSharkUtils.h
+++ b/Modules/ThirdParty/Shark/include/otbSharkUtils.h
@@ -21,7 +21,8 @@
 #ifndef otbSharkUtils_h
 #define otbSharkUtils_h
 
-#include "itkMacro.h"
+#include <stdexcept>
+#include <string>
 
 #if defined(__GNUC__) || defined(__clang__)
 #pragma GCC diagnostic push
@@ -41,11 +42,13 @@ namespace Shark
 {
 template <class T> void ListSampleRangeToSharkVector(const T * listSample, std::vector<shark::RealVector> & output, unsigned int start, unsigned int size)
 {
-  assert(listSample != ITK_NULLPTR);
+  assert(listSample != nullptr);
 
   if(start+size>listSample->Size())
     {
-    itkGenericExceptionMacro(<<"Requested range ["<<start<<", "<<start+size<<"[ is out of bound for input list sample (range [0, "<<listSample->Size()<<"[");
+    std::out_of_range e_(std::string("otb::Shark::ListSampleRangeToSharkVector "
+      ": Requested range is out of list sample bounds"));
+    throw e_;
     }
   
   output.clear();
@@ -83,11 +86,13 @@ template <class T> void ListSampleRangeToSharkVector(const T * listSample, std::
 
 template <class T> void ListSampleRangeToSharkVector(const T * listSample, std::vector<unsigned int> & output, unsigned int start, unsigned int size)
 {
-  assert(listSample != ITK_NULLPTR);
+  assert(listSample != nullptr);
 
   if(start+size>listSample->Size())
     {
-    itkGenericExceptionMacro(<<"Requested range ["<<start<<", "<<start+size<<"[ is out of bound for input list sample (range [0, "<<listSample->Size()<<"[");
+    std::out_of_range e_(std::string("otb::Shark::ListSampleRangeToSharkVector "
+      ": Requested range is out of list sample bounds"));
+    throw e_;
     }
 
   output.clear();
@@ -113,13 +118,13 @@ template <class T> void ListSampleRangeToSharkVector(const T * listSample, std::
 
 template <class T> void ListSampleToSharkVector(const T * listSample, std::vector<shark::RealVector> & output)
 {
-  assert(listSample != ITK_NULLPTR);
+  assert(listSample != nullptr);
   ListSampleRangeToSharkVector(listSample,output,0U,static_cast<unsigned int>(listSample->Size()));
 }
 
 template <class T> void ListSampleToSharkVector(const T * listSample, std::vector<unsigned int> & output)
 {
-  assert(listSample != ITK_NULLPTR);
+  assert(listSample != nullptr);
   ListSampleRangeToSharkVector(listSample,output,0, static_cast<unsigned int>(listSample->Size()));
 }
   
diff --git a/Modules/ThirdParty/Shark/otb-module-init.cmake b/Modules/ThirdParty/Shark/otb-module-init.cmake
index 6bdd8c6a31559d89eb7b56d8bb9b0295ed0c7c26..23ec6090c7031f09ffe821918d14e58de5fc764e 100644
--- a/Modules/ThirdParty/Shark/otb-module-init.cmake
+++ b/Modules/ThirdParty/Shark/otb-module-init.cmake
@@ -20,4 +20,8 @@
 
 find_package ( Shark REQUIRED )
 
+if(SHARK_USE_OPENMP AND NOT OTB_USE_OPENMP)
+  message(WARNING "Shark library is built with OpenMP and you have OTB_USE_OPENMP set to OFF.")
+endif()
+
 mark_as_advanced( Shark_DIR )