diff --git a/CMake/OTBModuleMacros.cmake b/CMake/OTBModuleMacros.cmake
index 0938a0f2fb9ed50fe44edf7017d9a4ac325e41b7..afb9a29f5c45b2509d0cd0bfe350df5a59ea846f 100644
--- a/CMake/OTBModuleMacros.cmake
+++ b/CMake/OTBModuleMacros.cmake
@@ -277,6 +277,12 @@ macro(otb_module_test)
   foreach(dep IN LISTS OTB_MODULE_${otb-module-test}_DEPENDS)
     list(APPEND ${otb-module-test}_LIBRARIES "${${dep}_LIBRARIES}")
   endforeach()
+  # make sure the test can link with optional libs
+  foreach(dep IN LISTS OTB_MODULE_${otb-module}_OPTIONAL_DEPENDS)
+    if (${dep}_ENABLED)
+      list(APPEND ${otb-module-test}_LIBRARIES "${${dep}_LIBRARIES}")
+    endif()
+  endforeach()
 endmacro()
 
 macro(otb_module_warnings_disable)
diff --git a/Modules/Applications/AppClassification/app/otbKMeansClassification.cxx b/Modules/Applications/AppClassification/app/otbKMeansClassification.cxx
index 52e0d6b426d7defb6343113029c17abe6dcacd15..ee04d9cd0fb2ff3035ca2208a8cd64c735cd93d8 100644
--- a/Modules/Applications/AppClassification/app/otbKMeansClassification.cxx
+++ b/Modules/Applications/AppClassification/app/otbKMeansClassification.cxx
@@ -291,12 +291,15 @@ protected:
         itkExceptionMacro(<< "File : " << modelFileName << " couldn't be opened");
       }
 
-      // get the end line with the centroids
+      // get the line with the centroids (starts with "2 ")
       std::string line, centroidLine;
       while(std::getline(infile,line))
       {
-        if (!line.empty())
+        if (line.size() > 2 && line[0] == '2' && line[1] == ' ')
+          {
           centroidLine = line;
+          break;
+          }
       }
 
       std::vector<std::string> centroidElm;
diff --git a/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx b/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx
index 1df2c463fc26c531095154515039a9245d0ba5c1..d9dd8e8816fec2cce2502a81b624a59ce33817ab 100644
--- a/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx
+++ b/Modules/Applications/AppDimensionalityReduction/app/otbImageDimensionalityReduction.cxx
@@ -124,12 +124,12 @@ protected:
 private:
   void DoInit() override
   {
-    SetName("DimensionalityReduction");
+    SetName("ImageDimensionalityReduction");
     SetDescription("Performs dimensionality reduction of the input image "
       "according to a dimensionality reduction model file.");
 
     // Documentation
-    SetDocName("DimensionalityReduction");
+    SetDocName("Image Dimensionality Reduction");
     SetDocLongDescription("This application reduces the dimension of an input"
                           " image, based on a machine learning model file produced by"
                           " the TrainDimensionalityReduction application. Pixels of the "
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
index 7bf3231a9b8d372473fd5ed0f598d3f2fd7c7c06..5fe7ec2f27c9f91f21a2ce2417502f5d4beac54d 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.h
@@ -33,8 +33,9 @@
 #endif
 #include "otb_shark.h"
 #include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
-#include <shark/Models/FFNet.h>
-#include <shark/Models/Autoencoder.h>
+#include <shark/Models/LinearModel.h>
+#include <shark/Models/ConcatenatedModel.h>
+#include <shark/Models/NeuronLayers.h>
 #if defined(__GNUC__) || defined(__clang__)
 #pragma GCC diagnostic pop
 #endif
@@ -76,9 +77,9 @@ public:
   typedef typename Superclass::ConfidenceListSampleType         ConfidenceListSampleType;
 
   /// Neural network related typedefs
-  typedef shark::Autoencoder<NeuronType,shark::LinearNeuron> OutAutoencoderType;
-  typedef shark::Autoencoder<NeuronType,NeuronType> AutoencoderType;
-  typedef shark::FFNet<NeuronType,shark::LinearNeuron> NetworkType;
+  typedef shark::ConcatenatedModel<shark::RealVector> ModelType;
+  typedef shark::LinearModel<shark::RealVector,NeuronType> LayerType;
+  typedef shark::LinearModel<shark::RealVector, shark::LinearNeuron> OutLayerType;
 
   itkNewMacro(Self);
   itkTypeMacro(AutoencoderModel, DimensionalityReductionModel);
@@ -127,18 +128,16 @@ public:
 
   void Train() override;
 
-  template <class T, class Autoencoder>
+  template <class T>
   void TrainOneLayer(
     shark::AbstractStoppingCriterion<T> & criterion,
-    Autoencoder &,
     unsigned int,
     shark::Data<shark::RealVector> &,
     std::ostream&);
 
-  template <class T, class Autoencoder>
+  template <class T>
   void TrainOneSparseLayer(
     shark::AbstractStoppingCriterion<T> & criterion,
-    Autoencoder &,
     unsigned int,
     shark::Data<shark::RealVector> &,
     std::ostream&);
@@ -166,7 +165,9 @@ protected:
 
 private:
   /** Internal Network */
-  NetworkType m_Net;
+  ModelType m_Encoder;
+  std::vector<LayerType> m_InLayers;
+  OutLayerType m_OutLayer;
   itk::Array<unsigned int> m_NumberOfHiddenNeurons;
   /** Training parameters */
   unsigned int m_NumberOfIterations; // stop the training after a fixed number of iterations
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
index 33f1c28e247c43f80ac28a1d608b1c15967c6a5e..e5a26e9ee3dc8cbf4918222f4b7b45bc93e925cb 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbAutoencoderModel.txx
@@ -34,18 +34,17 @@
 #include "otbSharkUtils.h"
 //include train function
 #include <shark/ObjectiveFunctions/ErrorFunction.h>
-#include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
+//~ #include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
 
 #include <shark/Algorithms/GradientDescent/Rprop.h>// the RProp optimization algorithm
 #include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
 #include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
-#include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
-#include <shark/Models/ConcatenatedModel.h>//to concatenate the noise with the model
+//~ #include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
 
 #include <shark/Algorithms/StoppingCriteria/MaxIterations.h> //A simple stopping criterion that stops after a fixed number of iterations
 #include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
 
-#include <shark/Algorithms/GradientDescent/SteepestDescent.h>
+#include <shark/Algorithms/GradientDescent/Adam.h>
 #if defined(__GNUC__) || defined(__clang__)
 #pragma GCC diagnostic pop
 #endif
@@ -83,96 +82,56 @@ AutoencoderModel<TInputValue,NeuronType>
     }
 
   // Initialization of the feed forward neural network
-  std::vector<size_t> layers;
-  layers.push_back(shark::dataDimension(inputSamples));
+  m_Encoder = ModelType();
+  m_InLayers.clear();
+  size_t previousShape = shark::dataDimension(inputSamples);
   for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
     {
-    layers.push_back(m_NumberOfHiddenNeurons[i]);
+    m_InLayers.push_back( LayerType(previousShape, m_NumberOfHiddenNeurons[i]) );
+    previousShape = m_NumberOfHiddenNeurons[i];
+    m_Encoder.add(&(m_InLayers.back()), true);
     }
-
   for (unsigned int i = std::max(0,static_cast<int>(m_NumberOfHiddenNeurons.Size()-1)) ; i > 0; --i)
     {
-    layers.push_back(m_NumberOfHiddenNeurons[i-1]);
-    }
-
-  layers.push_back(shark::dataDimension(inputSamples));
-  m_Net.setStructure(layers);
-  shark::initRandomNormal(m_Net,0.1);
-
-  // Training of the first Autoencoder (first and last layer of the FF network)
-  if (m_Epsilon > 0)
-    {
-    shark::TrainingProgress<> criterion(5,m_Epsilon);
-
-    OutAutoencoderType net;
-    // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. 
-    if (m_Noise[0] != 0)
-      {
-      TrainOneLayer(criterion, net, 0, inputSamples, ofs);
-      }
-    else
-      {
-      TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
-      }
-    criterion.reset();
+    m_InLayers.push_back( LayerType(previousShape, m_NumberOfHiddenNeurons[i-1]) );
+    previousShape = m_NumberOfHiddenNeurons[i-1];
     }
-  else
-    {
-    shark::MaxIterations<> criterion(m_NumberOfIterations);
+  m_OutLayer = OutLayerType(previousShape, shark::dataDimension(inputSamples));
 
-    OutAutoencoderType net;
-    // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
-    if (m_Noise[0] != 0)
-      {
-      TrainOneLayer(criterion, net, 0, inputSamples, ofs);
-      otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
-      }
-    else
-      {
-      TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
-      }
-    criterion.reset();
-    }
-
-  // Training of the other autoencoders
-  if (m_Epsilon > 0)
+  // Training of the autoencoders pairwise, starting from the first and last layers
+  for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
     {
-    shark::TrainingProgress<> criterion(5,m_Epsilon);
-
-    for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+    if (m_Epsilon > 0)
       {
-      AutoencoderType net;
+      shark::TrainingProgress<> criterion(5,m_Epsilon);
       // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
       if (m_Noise[i] != 0)
         {
-        TrainOneLayer(criterion, net, i, inputSamples, ofs);
+        TrainOneLayer(criterion, i, inputSamples, ofs);
         }
       else
         {
-        TrainOneSparseLayer(criterion, net, i, inputSamples, ofs);
+        TrainOneSparseLayer(criterion, i, inputSamples, ofs);
         }
       criterion.reset();
       }
-    }
-  else
-    {
-    shark::MaxIterations<> criterion(m_NumberOfIterations);
-
-    for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+    else
       {
-      AutoencoderType net;
+      shark::MaxIterations<> criterion(m_NumberOfIterations);
       // Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
       if (m_Noise[i] != 0)
         {
-        TrainOneLayer(criterion, net, i, inputSamples, ofs);
+        TrainOneLayer(criterion, i, inputSamples, ofs);
         otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
         }
       else
         {
-        TrainOneSparseLayer( criterion, net, i, inputSamples, ofs);
+        TrainOneSparseLayer( criterion, i, inputSamples, ofs);
         }
       criterion.reset();
       }
+    // encode the samples with the last encoder trained
+    inputSamples = m_InLayers[i](inputSamples);
     }
   if (m_NumberOfIterationsFineTuning > 0)
     {
@@ -183,31 +142,37 @@ AutoencoderModel<TInputValue,NeuronType>
 }
 
 template <class TInputValue, class NeuronType>
-template <class T, class Autoencoder>
+template <class T>
 void
 AutoencoderModel<TInputValue,NeuronType>
 ::TrainOneLayer(
   shark::AbstractStoppingCriterion<T> & criterion,
-  Autoencoder & net,
   unsigned int layer_index,
   shark::Data<shark::RealVector> &samples,
   std::ostream& File)
 {
+  typedef shark::AbstractModel<shark::RealVector,shark::RealVector> BaseModelType;
+  ModelType net;
+  net.add(&(m_InLayers[layer_index]), true);
+  net.add( (layer_index ?
+    (BaseModelType*) &(m_InLayers[m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index]) :
+    (BaseModelType*) &m_OutLayer) , true);
+
   otbMsgDevMacro(<< "Noise " <<  m_Noise[layer_index]);
   std::size_t inputs = dataDimension(samples);
-  net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
   initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
 
-  shark::ImpulseNoiseModel noise(inputs,m_Noise[layer_index],1.0); //set an input pixel with probability m_Noise to 0
-  shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
+  //~ shark::ImpulseNoiseModel noise(inputs,m_Noise[layer_index],1.0); //set an input pixel with probability m_Noise to 0
+  //~ shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
   shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
   shark::SquaredLoss<shark::RealVector> loss;
-  shark::ErrorFunction error(trainSet, &model, &loss);
+  //~ shark::ErrorFunction error(trainSet, &model, &loss);
+  shark::ErrorFunction<> error(trainSet, &net, &loss);
 
-  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
   error.setRegularizer(m_Regularization[layer_index],&regularizer);
 
-  shark::IRpropPlusFull optimizer;
+  shark::Adam<> optimizer;
   error.init();
   optimizer.init(error);
 
@@ -230,35 +195,37 @@ AutoencoderModel<TInputValue,NeuronType>
     } while( !criterion.stop( optimizer.solution() ) );
 
   net.setParameterVector(optimizer.solution().point);
-  m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
-  m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
-  samples = net.encode(samples);
 }
 
 template <class TInputValue, class NeuronType>
-template <class T, class Autoencoder>
+template <class T>
 void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(
   shark::AbstractStoppingCriterion<T> & criterion,
-  Autoencoder & net,
   unsigned int layer_index,
   shark::Data<shark::RealVector> &samples,
   std::ostream& File)
 {
-  //AutoencoderType net;
-  std::size_t inputs = dataDimension(samples);
-  net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
+  typedef shark::AbstractModel<shark::RealVector,shark::RealVector> BaseModelType;
+  ModelType net;
+  net.add(&(m_InLayers[layer_index]), true);
+  net.add( (layer_index ?
+    (BaseModelType*) &(m_InLayers[m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index]) :
+    (BaseModelType*) &m_OutLayer) , true);
 
+  std::size_t inputs = dataDimension(samples);
   shark::initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
 
   // Idea : set the initials value for the output weights higher than the input weights
 
   shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
   shark::SquaredLoss<shark::RealVector> loss;
-  shark::SparseAutoencoderError error(trainSet,&net, &loss, m_Rho[layer_index], m_Beta[layer_index]);
-
-  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  //~ shark::SparseAutoencoderError error(trainSet,&net, &loss, m_Rho[layer_index], m_Beta[layer_index]);
+  // SparseAutoencoderError doesn't exist anymore, for now use a plain ErrorFunction
+  shark::ErrorFunction<> error(trainSet, &net, &loss);
+  
+  shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
   error.setRegularizer(m_Regularization[layer_index],&regularizer);
-  shark::IRpropPlusFull optimizer;
+  shark::Adam<> optimizer;
   error.init();
   optimizer.init(error);
 
@@ -279,9 +246,6 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(
     File << "end layer" << std::endl;
     }
   net.setParameterVector(optimizer.solution().point);
-  m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
-  m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
-  samples = net.encode(samples);
 }
 
 template <class TInputValue, class NeuronType>
@@ -293,15 +257,23 @@ AutoencoderModel<TInputValue,NeuronType>
   shark::Data<shark::RealVector> &samples,
   std::ostream& File)
 {
+  // create full network
+  ModelType net;
+  for (auto &layer : m_InLayers)
+    {
+    net.add(&layer, true);
+    }
+  net.add(&m_OutLayer, true);
+  
   //labels identical to inputs
   shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);
   shark::SquaredLoss<shark::RealVector> loss;
 
-  shark::ErrorFunction error(trainSet, &m_Net, &loss);
-  shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+  shark::ErrorFunction<> error(trainSet, &net, &loss);
+  shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
   error.setRegularizer(m_Regularization[0],&regularizer);
 
-  shark::IRpropPlusFull optimizer;
+  shark::Adam<> optimizer;
   error.init();
   optimizer.init(error);
   otbMsgDevMacro(<<"Error before training : " << optimizer.solution().value);
@@ -326,7 +298,6 @@ AutoencoderModel<TInputValue,NeuronType>
   try
     {
     this->Load(filename);
-    m_Net.name();
     }
   catch(...)
     {
@@ -350,22 +321,15 @@ AutoencoderModel<TInputValue,NeuronType>
 {
   otbMsgDevMacro(<< "saving model ...");
   std::ofstream ofs(filename);
-  ofs << m_Net.name() << std::endl; // the first line of the model file contains a key
+  ofs << "Autoencoder" << std::endl; // the first line of the model file contains a key
+  ofs << (m_InLayers.size() + 1) << std::endl; // second line is the number of encoders/decoders 
   shark::TextOutArchive oa(ofs);
-  oa << m_Net;
-  ofs.close();
-
-  if (this->m_WriteWeights == true)     // output the map vectors in a txt file
+  for (const auto &layer : m_InLayers)
     {
-    std::ofstream otxt(filename+".txt");
-    for (unsigned int i = 0 ; i < m_Net.layerMatrices().size(); ++i)
-      {
-      otxt << "layer " << i << std::endl;
-      otxt << m_Net.layerMatrix(i) << std::endl;
-      otxt << m_Net.bias(i) << std::endl;
-      otxt << std::endl;
-      }
+    oa << layer;
     }
+  oa << m_OutLayer;
+  ofs.close();
 }
 
 template <class TInputValue, class NeuronType>
@@ -373,23 +337,39 @@ void
 AutoencoderModel<TInputValue,NeuronType>
 ::Load(const std::string & filename, const std::string & /*name*/)
 {
-  NetworkType net;
   std::ifstream ifs(filename);
-  char autoencoder[256];
-  ifs.getline(autoencoder,256);
-  std::string autoencoderstr(autoencoder);
-
-  if (autoencoderstr != net.name()){
+  char buffer[256];
+  // check first line
+  ifs.getline(buffer,256);
+  std::string bufferStr(buffer);
+  if (bufferStr != "Autoencoder"){
     itkExceptionMacro(<< "Error opening " << filename.c_str() );
     }
+  // check second line
+  ifs.getline(buffer,256);
+  int nbLevels = boost::lexical_cast<int>(buffer);
+  if (nbLevels < 2 || nbLevels%2 == 1)
+    {
+    itkExceptionMacro(<< "Unexpected number of levels : "<<buffer );
+    }
+  m_InLayers.clear();
+  m_Encoder = ModelType();
   shark::TextInArchive ia(ifs);
-  ia >> m_Net;
+  for (int i=0 ; (i+1) < nbLevels ; i++)
+    {
+    LayerType layer;
+    ia >> layer;
+    m_InLayers.push_back(layer);
+    }
+  ia >> m_OutLayer;
   ifs.close();
 
-  // This gives us the dimension if we keep the encoder and decoder
-  size_t feature_layer_index = m_Net.layerMatrices().size()/2;
-  // number of neurons in the feature layer (second dimension of the first decoder weight matrix)
-  this->SetDimension(m_Net.layerMatrix(feature_layer_index).size2());
+  for (int i=0 ; i < nbLevels/2 ; i++)
+    {
+    m_Encoder.add(&(m_InLayers[i]) ,true);
+    }
+
+  this->SetDimension( m_Encoder.outputShape()[0] );
 }
 
 template <class TInputValue, class NeuronType>
@@ -409,7 +389,7 @@ AutoencoderModel<TInputValue,NeuronType>
   shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
 
   // features layer for a network containing the encoder and decoder part
-  data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
+  data = m_Encoder(data);
   TargetSampleType target;
   target.SetSize(this->m_Dimension);
 
@@ -435,7 +415,7 @@ AutoencoderModel<TInputValue,NeuronType>
   shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
   TargetSampleType target;
   // features layer for a network containing the encoder and decoder part
-  data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
+  data = m_Encoder(data);
 
   unsigned int id = startIndex;
   target.SetSize(this->m_Dimension);
diff --git a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
index 9f39326a21bc5f1980a49d80ecdaea55b42a450a..a387852fecc386d9c5f2a6c27c7bf39cd7a3649d 100644
--- a/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
+++ b/Modules/Learning/DimensionalityReductionLearning/include/otbPCAModel.txx
@@ -137,11 +137,11 @@ PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*
   ifs.close();
   if (this->m_Dimension ==0)
   {
-    this->m_Dimension = m_Encoder.outputSize();
+    this->m_Dimension = m_Encoder.outputShape()[0];
   }
 
   auto eigenvectors = m_Encoder.matrix();
-  eigenvectors.resize(this->m_Dimension,m_Encoder.inputSize());
+  eigenvectors.resize(this->m_Dimension,m_Encoder.inputShape()[0]);
 
   m_Encoder.setStructure(eigenvectors, m_Encoder.offset() );
 }
diff --git a/Modules/Learning/LearningBase/otb-module.cmake b/Modules/Learning/LearningBase/otb-module.cmake
index afa2a339a1813cf16e5f6ea3700f079a36180dcd..c0af985032de6d4a2acd11988be1b9a177cf8219 100644
--- a/Modules/Learning/LearningBase/otb-module.cmake
+++ b/Modules/Learning/LearningBase/otb-module.cmake
@@ -28,7 +28,11 @@ otb_module(OTBLearningBase
     OTBImageBase
     OTBITK
 
-  TEST_DEPENDS
+    OPTIONAL_DEPENDS
+    OTBShark
+
+    TEST_DEPENDS
+    OTBBoost
     OTBTestKernel
     OTBImageIO
 
diff --git a/Modules/Learning/LearningBase/test/CMakeLists.txt b/Modules/Learning/LearningBase/test/CMakeLists.txt
index d1d16c3e65801e606c6e6903538b65264a4483a6..48e28cc5cad320ffa41eee0659ff6979d0bf4457 100644
--- a/Modules/Learning/LearningBase/test/CMakeLists.txt
+++ b/Modules/Learning/LearningBase/test/CMakeLists.txt
@@ -32,6 +32,10 @@ otbKMeansImageClassificationFilterNew.cxx
 otbMachineLearningModelTemplates.cxx
 )
 
+if(OTB_USE_SHARK)
+  set(OTBLearningBaseTests ${OTBLearningBaseTests} otbSharkUtilsTests.cxx)
+endif()
+
 add_executable(otbLearningBaseTestDriver ${OTBLearningBaseTests})
 target_link_libraries(otbLearningBaseTestDriver ${OTBLearningBase-Test_LIBRARIES})
 otb_module_target_label(otbLearningBaseTestDriver)
@@ -68,3 +72,7 @@ otb_add_test(NAME leTuDecisionTreeNew COMMAND otbLearningBaseTestDriver
 otb_add_test(NAME leTuKMeansImageClassificationFilterNew COMMAND otbLearningBaseTestDriver
   otbKMeansImageClassificationFilterNew)
 
+if(OTB_USE_SHARK)
+  otb_add_test(NAME leTuSharkNormalizeLabels COMMAND otbLearningBaseTestDriver
+    otbSharkNormalizeLabels)
+endif()
diff --git a/Modules/Learning/LearningBase/test/otbLearningBaseTestDriver.cxx b/Modules/Learning/LearningBase/test/otbLearningBaseTestDriver.cxx
index 5b38bf300dd4520c18e198b6e6643848cbdc937c..dc2d36b7943129ec6519ebbc4f194d1dd6078800 100644
--- a/Modules/Learning/LearningBase/test/otbLearningBaseTestDriver.cxx
+++ b/Modules/Learning/LearningBase/test/otbLearningBaseTestDriver.cxx
@@ -29,4 +29,7 @@ void RegisterTests()
   REGISTER_TEST(otbSEMClassifierNew);
   REGISTER_TEST(otbDecisionTreeNew);
   REGISTER_TEST(otbKMeansImageClassificationFilterNew);
+#ifdef OTB_USE_SHARK
+  REGISTER_TEST(otbSharkNormalizeLabels);
+#endif
 }
diff --git a/Modules/Learning/LearningBase/test/otbSharkUtilsTests.cxx b/Modules/Learning/LearningBase/test/otbSharkUtilsTests.cxx
new file mode 100644
index 0000000000000000000000000000000000000000..bc3783cb728b0f5ad0f6b2d43620b18ba7939e30
--- /dev/null
+++ b/Modules/Learning/LearningBase/test/otbSharkUtilsTests.cxx
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+ *
+ * This file is part of Orfeo Toolbox
+ *
+ *     https://www.orfeo-toolbox.org/
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "itkMacro.h"
+#include "otbSharkUtils.h"
+
+
+int otbSharkNormalizeLabels(int itkNotUsed(argc), char* itkNotUsed(argv) [])
+{
+  std::vector<unsigned int> inLabels = {2, 2, 3, 20, 1};
+  std::vector<unsigned int> expectedDictionary = {2, 3, 20, 1};
+  std::vector<unsigned int> expectedLabels = {0, 0, 1, 2, 3};
+
+  auto newLabels = inLabels;
+  std::vector<unsigned int> labelDict;
+  otb::Shark::NormalizeLabelsAndGetDictionary(newLabels, labelDict);
+
+  if(newLabels != expectedLabels)
+    {
+    std::cout << "Wrong new labels\n";
+    for(size_t i = 0; i<newLabels.size(); ++i)
+      std::cout << "Got " << newLabels[i] << " expected " << expectedLabels[i] << '\n';
+
+    return EXIT_FAILURE;
+    }
+
+  if(labelDict != expectedDictionary)
+    {
+    std::cout << "Wrong dictionary\n";
+    for(size_t i = 0; i<labelDict.size(); ++i)
+      std::cout << "Got " << labelDict[i] << " expected " << expectedDictionary[i] << '\n';
+
+    return EXIT_FAILURE;
+    }
+
+  return EXIT_SUCCESS;
+}
diff --git a/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.h b/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.h
index 7dd41d6eed22208d512ecf0e5d4eb5116f2bf5c0..9e4a985901c2f7007529c279ce156e371ebbcbd2 100644
--- a/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.h
+++ b/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.h
@@ -34,6 +34,7 @@
 #pragma GCC diagnostic ignored "-Wcast-align"
 #pragma GCC diagnostic ignored "-Wunknown-pragmas"
 #endif
+#include <shark/Models/Classifier.h>
 #include "otb_shark.h"
 #include "shark/Algorithms/Trainers/RFTrainer.h"
 #if defined(__GNUC__) || defined(__clang__)
@@ -134,6 +135,10 @@ public:
   /** If true, margin confidence value will be computed */
   itkSetMacro(ComputeMargin, bool);
 
+  /** If true, class labels will be normalised in [0 ... nbClasses] */
+  itkGetMacro(NormalizeClassLabels, bool);
+  itkSetMacro(NormalizeClassLabels, bool);
+
 protected:
   /** Constructor */
   SharkRandomForestsMachineLearningModel();
@@ -154,8 +159,10 @@ private:
   SharkRandomForestsMachineLearningModel(const Self &); //purposely not implemented
   void operator =(const Self&); //purposely not implemented
 
-  shark::RFClassifier m_RFModel;
-  shark::RFTrainer m_RFTrainer;
+  shark::RFClassifier<unsigned int> m_RFModel;
+  shark::RFTrainer<unsigned int> m_RFTrainer;
+  std::vector<unsigned int> m_ClassDictionary;
+  bool m_NormalizeClassLabels;
 
   unsigned int m_NumberOfTrees;
   unsigned int m_MTry;
diff --git a/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.txx b/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.txx
index 207f1abdd77e4b5cfffd9bc5d104c4b40232f853..72c816069bebddc048a0f8af48f24579a55fa38b 100644
--- a/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.txx
+++ b/Modules/Learning/Supervised/include/otbSharkRandomForestsMachineLearningModel.txx
@@ -32,7 +32,6 @@
 #pragma GCC diagnostic ignored "-Woverloaded-virtual"
 #pragma GCC diagnostic ignored "-Wignored-qualifiers"
 #endif
-#include <shark/Models/Converter.h>
 #if defined(__GNUC__) || defined(__clang__)
 #pragma GCC diagnostic pop
 #endif
@@ -52,6 +51,7 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
   this->m_ConfidenceIndex = true;
   this->m_IsRegressionSupported = false;
   this->m_IsDoPredictBatchMultiThreaded = true;
+  this->m_NormalizeClassLabels = true;
 }
 
 
@@ -76,13 +76,17 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
 
   Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
   Shark::ListSampleToSharkVector(this->GetTargetListSample(), class_labels);
+  if(m_NormalizeClassLabels)
+    {
+    Shark::NormalizeLabelsAndGetDictionary(class_labels, m_ClassDictionary);
+    }
   shark::ClassificationDataset TrainSamples = shark::createLabeledDataFromRange(features,class_labels);
 
   //Set parameters
   m_RFTrainer.setMTry(m_MTry);
   m_RFTrainer.setNTrees(m_NumberOfTrees);
   m_RFTrainer.setNodeSize(m_NodeSize);
-  m_RFTrainer.setOOBratio(m_OobRatio);
+  //  m_RFTrainer.setOOBratio(m_OobRatio);
   m_RFTrainer.train(m_RFModel, TrainSamples);
 
 }
@@ -125,15 +129,20 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
     }
   if (quality != ITK_NULLPTR)
     {
-    shark::RealVector probas = m_RFModel(samples);
+    shark::RealVector probas = m_RFModel.decisionFunction()(samples);
     (*quality) = ComputeConfidence(probas, m_ComputeMargin);
     }
-  shark::ArgMaxConverter<shark::RFClassifier> amc;
-  amc.decisionFunction() = m_RFModel;
-  unsigned int res;
-  amc.eval(samples, res);
+  unsigned int res{0};
+  m_RFModel.eval(samples, res);
   TargetSampleType target;
-  target[0] = static_cast<TOutputValue>(res);
+  if(m_NormalizeClassLabels)
+    {
+    target[0] = m_ClassDictionary[static_cast<TOutputValue>(res)];
+    }
+  else
+    {
+    target[0] = static_cast<TOutputValue>(res);
+    }
   return target;
 }
 
@@ -157,13 +166,13 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
   Shark::ListSampleRangeToSharkVector(input, features,startIndex,size);
   shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange(features);
 
-  #ifdef _OPENMP
+#ifdef _OPENMP
   omp_set_num_threads(itk::MultiThreader::GetGlobalDefaultNumberOfThreads());
-  #endif
+#endif
   
   if(quality != ITK_NULLPTR)
     {
-    shark::Data<shark::RealVector> probas = m_RFModel(inputSamples);
+    shark::Data<shark::RealVector> probas = m_RFModel.decisionFunction()(inputSamples);
     unsigned int id = startIndex;
     for(shark::RealVector && p : probas.elements())
       {
@@ -175,14 +184,19 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
       }
     }
     
-  shark::ArgMaxConverter<shark::RFClassifier> amc;
-  amc.decisionFunction() = m_RFModel;
-  auto prediction = amc(inputSamples);
+  auto prediction = m_RFModel(inputSamples);
   unsigned int id = startIndex;
   for(const auto& p : prediction.elements())
     {
     TargetSampleType target;
-    target[0] = static_cast<TOutputValue>(p);
+    if(m_NormalizeClassLabels)
+      {
+      target[0] = m_ClassDictionary[static_cast<TOutputValue>(p)];
+      }
+    else
+      {
+      target[0] = static_cast<TOutputValue>(p);
+      }
     targets->SetMeasurementVector(id,target);
     ++id;
     }
@@ -199,7 +213,18 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
     itkExceptionMacro(<< "Error opening " << filename.c_str() );
     }
   // Add comment with model file name
-  ofs << "#" << m_RFModel.name() << std::endl;
+  ofs << "#" << m_RFModel.name();
+  if(m_NormalizeClassLabels) ofs  << " with_dictionary";
+  ofs << std::endl;
+  if(m_NormalizeClassLabels)
+    {
+    ofs << m_ClassDictionary.size() << " ";
+    for(const auto& l : m_ClassDictionary)
+      {
+      ofs << l << " ";
+      }
+    ofs << std::endl;
+    }
   shark::TextOutArchive oa(ofs);
   m_RFModel.save(oa,0);
 }
@@ -219,6 +244,10 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
       {
       if( line.find( m_RFModel.name() ) == std::string::npos )
         itkExceptionMacro( "The model file : " + filename + " cannot be read." );
+      if( line.find( "with_dictionary" ) == std::string::npos )
+        {
+        m_NormalizeClassLabels=false;
+        }
       }
     else
       {
@@ -226,6 +255,18 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
       ifs.clear();
       ifs.seekg( 0, std::ios::beg );
       }
+    if(m_NormalizeClassLabels)
+      {
+      size_t nbLabels{0};
+      ifs >> nbLabels;
+      m_ClassDictionary.resize(nbLabels);
+      for(size_t i=0; i<nbLabels; ++i)
+        {
+        unsigned int label;
+        ifs >> label;
+        m_ClassDictionary[i]=label;
+        }
+      }
     shark::TextInArchive ia( ifs );
     m_RFModel.load( ia, 0 );
     }
diff --git a/Modules/Learning/Unsupervised/include/otbSharkKMeansMachineLearningModel.txx b/Modules/Learning/Unsupervised/include/otbSharkKMeansMachineLearningModel.txx
index 9dd43948a719c9305dace0a6366ebfd40e4b3e24..1b08d538c943001279d9401f314d51e21e8dbf88 100644
--- a/Modules/Learning/Unsupervised/include/otbSharkKMeansMachineLearningModel.txx
+++ b/Modules/Learning/Unsupervised/include/otbSharkKMeansMachineLearningModel.txx
@@ -55,6 +55,7 @@ SharkKMeansMachineLearningModel<TInputValue, TOutputValue>
         m_Normalized( false ), m_K(2), m_MaximumNumberOfIterations( 10 )
 {
   // Default set HardClusteringModel
+  this->m_ConfidenceIndex = true;
   m_ClusteringModel = boost::make_shared<ClusteringModelType>( &m_Centroids );
 }
 
@@ -174,7 +175,7 @@ SharkKMeansMachineLearningModel<TInputValue, TOutputValue>
   // Change quality measurement only if SoftClustering or other clustering method is used.
   if( quality != ITK_NULLPTR )
     {
-    for( unsigned int qid = startIndex; qid < size; ++qid )
+    for( unsigned int qid = startIndex; qid < startIndex+size; ++qid )
       {
       quality->SetMeasurementVector( qid, static_cast<ConfidenceValueType>(1.) );
       }
diff --git a/Modules/ThirdParty/Boost/otb-module-init.cmake b/Modules/ThirdParty/Boost/otb-module-init.cmake
index a5f58041fb0bf5001d123ff2e0f772a26cff6d34..0a07bdfdbc7a9d33a03464925c95667a036bea59 100644
--- a/Modules/ThirdParty/Boost/otb-module-init.cmake
+++ b/Modules/ThirdParty/Boost/otb-module-init.cmake
@@ -31,3 +31,8 @@ if (BUILD_TESTING)
     message(STATUS "Found Boost components: unit_test_framework")
   endif()
 endif() #BUILD_TESTING
+
+if(WIN32)
+  # disable autolinking in boost
+	add_definitions( -DBOOST_ALL_NO_LIB )
+endif()
diff --git a/Modules/ThirdParty/Shark/include/otbSharkUtils.h b/Modules/ThirdParty/Shark/include/otbSharkUtils.h
index de3adf77401d0f131d2bd7d447627829b3df64ff..04c57b6d4e7f5a022b0c4fafa86ac41b134f690c 100644
--- a/Modules/ThirdParty/Shark/include/otbSharkUtils.h
+++ b/Modules/ThirdParty/Shark/include/otbSharkUtils.h
@@ -23,6 +23,7 @@
 
 #include <stdexcept>
 #include <string>
+#include <unordered_map>
 
 #if defined(__GNUC__) || defined(__clang__)
 #pragma GCC diagnostic push
@@ -127,6 +128,27 @@ template <class T> void ListSampleToSharkVector(const T * listSample, std::vecto
   assert(listSample != nullptr);
   ListSampleRangeToSharkVector(listSample,output,0, static_cast<unsigned int>(listSample->Size()));
 }
+
+/** Shark assumes that labels are 0 ... (nbClasses-1). This function modifies the labels contained in the input vector and returns a vector with size = nbClasses which allows the translation from the normalised labels to the new ones oldLabel = dictionary[newLabel].
+*/
+template <typename T> void NormalizeLabelsAndGetDictionary(std::vector<T>& labels, 
+                                                           std::vector<T>& dictionary)
+{
+  std::unordered_map<T, T> dictMap;
+  T labelCount{0};
+  for(const auto& l : labels)
+    {
+    if(dictMap.find(l)==dictMap.end())
+      dictMap.insert({l, labelCount++});
+    }
+  dictionary.resize(labelCount);
+  for(auto& l : labels)
+    {
+    auto newLabel = dictMap[l];
+    dictionary[newLabel] = l;
+    l = newLabel;
+    }
+}
   
 }
 }
diff --git a/SuperBuild/CMake/External_shark.cmake b/SuperBuild/CMake/External_shark.cmake
index ce8486db084935352b4266fc384f40be3604a29c..33934d4bb943dce00faceb9d6910534197a2342e 100644
--- a/SuperBuild/CMake/External_shark.cmake
+++ b/SuperBuild/CMake/External_shark.cmake
@@ -30,8 +30,8 @@ ADD_SUPERBUILD_CMAKE_VAR(SHARK BOOST_LIBRARYDIR)
 
 ExternalProject_Add(SHARK
   PREFIX SHARK
-  URL "https://github.com/Shark-ML/Shark/archive/349f29bd71c370e0f88f7fc9aa66fa5c4768fcb0.zip"
-  URL_MD5 d6e4310f943e8dda4a0151612b5c62ce
+  URL "https://github.com/Shark-ML/Shark/archive/2fd55e2b83f0666d05b403b291712668f4b76a13.zip"
+  URL_MD5 863bb5f0d94b01be5292867beb05a0bb
   SOURCE_DIR ${SHARK_SB_SRC}
   BINARY_DIR ${SHARK_SB_BUILD_DIR}
   INSTALL_DIR ${SB_INSTALL_PREFIX}
@@ -45,6 +45,7 @@ ExternalProject_Add(SHARK
   -DENABLE_HDF5:BOOL=OFF
   -DENABLE_CBLAS:BOOL=OFF
   -DENABLE_OPENMP:BOOL=${OTB_USE_OPENMP}
+  -DSHARK_INSTALL_LIB_DIR:STRING=lib/
   ${SHARK_SB_CONFIG}
   CMAKE_COMMAND ${SB_CMAKE_COMMAND}
   LOG_DOWNLOAD 1
diff --git a/SuperBuild/patches/SHARK/shark-2-ext-num-literals-all.diff b/SuperBuild/patches/SHARK/shark-2-ext-num-literals-all.diff
new file mode 100644
index 0000000000000000000000000000000000000000..0b964c1b9ada7aa4409f0f032285a70723caacfe
--- /dev/null
+++ b/SuperBuild/patches/SHARK/shark-2-ext-num-literals-all.diff
@@ -0,0 +1,13 @@
+diff -burN Shark.orig/CMakeLists.txt Shark/CMakeLists.txt
+--- Shark.orig/CMakeLists.txt	2018-02-05 18:04:58.012612932 +0100
++++ Shark/CMakeLists.txt	2018-02-05 18:20:50.032233165 +0100
+@@ -415,6 +415,9 @@
+ #####################################################################
+ #                       General Path settings
+ #####################################################################
++if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
++  add_definitions(-fext-numeric-literals)
++endif()
+ include_directories( ${shark_SOURCE_DIR}/include )
+ include_directories( ${shark_BINARY_DIR}/include )
+ add_subdirectory( include )
diff --git a/SuperBuild/patches/SHARK/shark-2-find-boost-all.diff b/SuperBuild/patches/SHARK/shark-2-find-boost-all.diff
deleted file mode 100644
index a97c1ac4afd1f56118fdba14cf7b993755bb5c00..0000000000000000000000000000000000000000
--- a/SuperBuild/patches/SHARK/shark-2-find-boost-all.diff
+++ /dev/null
@@ -1,16 +0,0 @@
-diff -burN Shark-349f29bd71c370e0f88f7fc9aa66fa5c4768fcb0.orig/CMakeLists.txt Shark-349f29bd71c370e0f88f7fc9aa66fa5c4768fcb0/CMakeLists.txt
---- Shark-349f29bd71c370e0f88f7fc9aa66fa5c4768fcb0.orig/CMakeLists.txt	2017-08-22 11:31:50.472052695 +0200
-+++ Shark-349f29bd71c370e0f88f7fc9aa66fa5c4768fcb0/CMakeLists.txt	2017-08-22 11:32:36.448358789 +0200
-@@ -141,10 +141,8 @@
- 
- find_package( 
- 	Boost 1.48.0 REQUIRED COMPONENTS
--	system date_time filesystem
--	program_options serialization thread
--	unit_test_framework
--)
-+	serialization
-+	)
- 
- if(NOT Boost_FOUND)
- 	message(FATAL_ERROR "Please make sure Boost 1.48.0 is installed on your system")