diff --git a/app/cbDimensionalityReduction.cxx b/app/cbDimensionalityReduction.cxx
index bc9a5754fec14e92c50e394ccc781d8b211bcf5b..106f609d48078a1e5cee94ed5276571dd589e61c 100644
--- a/app/cbDimensionalityReduction.cxx
+++ b/app/cbDimensionalityReduction.cxx
@@ -222,7 +222,7 @@ private:
         }
         
       // Rescale vector image
-      m_Rescaler->SetScale(stddevMeasurementVector*3);
+      m_Rescaler->SetScale(stddevMeasurementVector);
       m_Rescaler->SetShift(meanMeasurementVector);
       m_Rescaler->SetInput(inImage);
 
diff --git a/app/cbDimensionalityReductionTrainer.cxx b/app/cbDimensionalityReductionTrainer.cxx
index 55232ef871b527a00e5e3664737eb2bb4c9bdb1f..4cb042427b0288b1da73b4149afbab4b152fecce 100644
--- a/app/cbDimensionalityReductionTrainer.cxx
+++ b/app/cbDimensionalityReductionTrainer.cxx
@@ -124,7 +124,7 @@ private:
 		ShiftScaleFilterType::Pointer trainingShiftScaleFilter = ShiftScaleFilterType::New();
 		trainingShiftScaleFilter->SetInput(input);
 		trainingShiftScaleFilter->SetShifts(meanMeasurementVector);
-		trainingShiftScaleFilter->SetScales(stddevMeasurementVector*3);
+		trainingShiftScaleFilter->SetScales(stddevMeasurementVector);
 		trainingShiftScaleFilter->Update();
 
 		ListSampleType::Pointer trainingListSample= trainingShiftScaleFilter->GetOutput();
diff --git a/app/cbDimensionalityReductionVector.cxx b/app/cbDimensionalityReductionVector.cxx
index cf2caed54850b84703a3a5d34670438f9c5c3a5f..12e1307ad112a8e3a71252a352f029fa0dfd72ca 100644
--- a/app/cbDimensionalityReductionVector.cxx
+++ b/app/cbDimensionalityReductionVector.cxx
@@ -223,7 +223,7 @@ class CbDimensionalityReductionVector : public Application
 			ShiftScaleFilterType::Pointer trainingShiftScaleFilter = ShiftScaleFilterType::New();
 			trainingShiftScaleFilter->SetInput(input);
 			trainingShiftScaleFilter->SetShifts(meanMeasurementVector);
-			trainingShiftScaleFilter->SetScales(stddevMeasurementVector*3);
+			trainingShiftScaleFilter->SetScales(stddevMeasurementVector);
 			trainingShiftScaleFilter->Update();
 			otbAppLogINFO("mean used: " << meanMeasurementVector);
 			otbAppLogINFO("standard deviation used: " << stddevMeasurementVector);
diff --git a/include/AutoencoderModel.h b/include/AutoencoderModel.h
index 23cf4a2c176a5770ba06619b74e3c84dd267afe0..5dbca52e2724bc4370279728c49a0b702a5a1d0e 100644
--- a/include/AutoencoderModel.h
+++ b/include/AutoencoderModel.h
@@ -83,12 +83,15 @@ public:
 
 	void Train() ITK_OVERRIDE;
 	
-	template <class T>
-	void TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion,unsigned int, unsigned int,double, double, shark::Data<shark::RealVector> &, std::ostream&);
+	template <class T, class Autoencoder>
+	void TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion,Autoencoder &,unsigned int, unsigned int,double, double, shark::Data<shark::RealVector> &, std::ostream&);
 	
 	template <class T, class Autoencoder>
 	void TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion,Autoencoder &, unsigned int, unsigned int,double, double,double, shark::Data<shark::RealVector> &, std::ostream&);
 	
+	template <class T>
+	void TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double, double,double, shark::Data<shark::RealVector> &, std::ostream&);
+	
 protected:
 	AutoencoderModel();	
 	~AutoencoderModel() ITK_OVERRIDE;
diff --git a/include/AutoencoderModel.txx b/include/AutoencoderModel.txx
index 20dde00f4f23158ee5db935350b51d42719bcc3e..668bb7d6adfcc276b11727d2761dc984cd1d1d39 100644
--- a/include/AutoencoderModel.txx
+++ b/include/AutoencoderModel.txx
@@ -40,6 +40,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 	std::vector<shark::RealVector> features;
 	Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
 	shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+	shark::Data<shark::RealVector> inputSamples_copy = inputSamples;
 	
 	std::ofstream ofs;
 	if (this->m_WriteLearningCurve =true) 
@@ -54,10 +55,8 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 	for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
 	{
 		layers.push_back(m_NumberOfHiddenNeurons[i]);
-		std::cout << m_NumberOfHiddenNeurons.Size() << std::endl;
 	}
-	// another loop for the decoder should be added, for now i just add the output layer size
-	std::cout << "i?" << static_cast<int>(m_NumberOfHiddenNeurons.Size()-1) << std::endl;
+	
 	for (unsigned int i = std::max(0,static_cast<int>(m_NumberOfHiddenNeurons.Size()-1)) ; i > 0; --i)
 	{
 		std::cout << i << std::endl;
@@ -73,13 +72,13 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 	if (m_Epsilon > 0){
 		shark::TrainingProgress<> criterion(5,m_Epsilon);
 		
+		OutAutoencoderType net;
 		if (m_Noise[0] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
 		{
-			TrainOneLayer(criterion,0 , m_NumberOfHiddenNeurons[0],m_Noise[0],m_Regularization[0], inputSamples,ofs);
+			TrainOneLayer(criterion,net,0 , m_NumberOfHiddenNeurons[0],m_Noise[0],m_Regularization[0], inputSamples,ofs);
 		}
 		else
 		{
-			OutAutoencoderType net;
 			TrainOneSparseLayer( criterion, net , 0 , m_NumberOfHiddenNeurons[0],m_Rho[0],m_Beta[0],m_Regularization[0],inputSamples, ofs);
 		}
 		criterion.reset();
@@ -88,13 +87,13 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 	else {
 		shark::MaxIterations<> criterion(m_NumberOfIterations);
 
+		OutAutoencoderType net;
 		if (m_Noise[0] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
 		{
-			TrainOneLayer(criterion,0, m_NumberOfHiddenNeurons[0],m_Noise[0],m_Regularization[0], inputSamples, ofs);
+			TrainOneLayer(criterion,net,0, m_NumberOfHiddenNeurons[0],m_Noise[0],m_Regularization[0], inputSamples, ofs);
 		}
 		else
 		{
-			OutAutoencoderType net;
 			TrainOneSparseLayer(criterion, net, 0, m_NumberOfHiddenNeurons[0],m_Rho[0],m_Beta[0],m_Regularization[0], inputSamples, ofs);
 		}
 		criterion.reset();
@@ -107,13 +106,13 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 		
 		for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
 		{
+			AutoencoderType net;
 			if (m_Noise[i] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
 			{
-				TrainOneLayer(criterion,i , m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples,ofs);
+				TrainOneLayer(criterion,net,i , m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples,ofs);
 			}
 			else
 			{
-				AutoencoderType net;
 				TrainOneSparseLayer( criterion, net , i , m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i],inputSamples, ofs);
 			}
 			criterion.reset();
@@ -126,26 +125,29 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
 		
 		for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
 		{
+			AutoencoderType net;
 			if (m_Noise[i] != 0)   // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen
 			{
-				TrainOneLayer(criterion,i, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples, ofs);
+				TrainOneLayer(criterion,net, i,m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples, ofs);
 			}
 			else
 			{
-				AutoencoderType net;
 				TrainOneSparseLayer(criterion, net, i, m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples, ofs);
 			}
 			criterion.reset();
 		}
 		
 	}
+	shark::MaxIterations<> criterion(m_NumberOfIterations);
+	TrainNetwork(criterion, m_Rho[0],m_Beta[0],m_Regularization[0], inputSamples_copy, ofs);
+			
 }
 
 template <class TInputValue, class NeuronType>
-template <class T>
-void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion,unsigned int layer_index, unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
+template <class T, class Autoencoder>
+void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, Autoencoder & net,unsigned int layer_index, unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
 {
-	AutoencoderType net;
+	//AutoencoderType net;
 
 	std::size_t inputs = dataDimension(samples);
 	net.setStructure(inputs, nbneuron);
@@ -160,7 +162,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
 	shark::TwoNormRegularizer regularizer(error.numberOfVariables());
 	error.setRegularizer(regularization,&regularizer);
 
-	shark::IRpropPlusFull optimizer;
+	shark::RpropPlus optimizer;
 	error.init();
 	optimizer.init(error);
 	
@@ -178,13 +180,14 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
 		{	
 		File << optimizer.solution().value << std::endl;
 		}
+		std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
+	
 	} while( !criterion.stop( optimizer.solution() ) );
-	std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
 	
 	net.setParameterVector(optimizer.solution().point);
-	// m_net.push_back(net);
-	m_net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());
-	m_net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias());
+	//m_net.push_back(net);
+	m_net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
+	m_net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
 	samples = net.encode(samples);
 }
 
@@ -206,7 +209,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
 	shark::TwoNormRegularizer regularizer(error.numberOfVariables());
 	error.setRegularizer(regularization,&regularizer);
 
-	shark::IRpropPlusFull optimizer;
+	shark::RpropPlus optimizer;
 	error.init();
 	optimizer.init(error);
 	std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
@@ -214,6 +217,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
 	do{
 		i++;
 		optimizer.step(error);
+		std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
 		if (this->m_WriteLearningCurve =true) 
 		{	
 		File << optimizer.solution().value << std::endl;
@@ -229,11 +233,41 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
 	m_net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias());  // Copy the encoder in the FF neural network
 	m_net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
 	samples = net.encode(samples);
-	std::cout << "numero zero " << m_net.layerMatrix(layer_index) << "i " << layer_index <<std::endl <<std::endl;
-	std::cout << "numero uno " << m_net.layerMatrix(m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index) << "i " <<  m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index << std::endl <<std::endl;
 	
 }
 
+
+template <class TInputValue, class NeuronType>
+template <class T>
+void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
+{
+	
+	shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
+	shark::SquaredLoss<shark::RealVector> loss;
+	
+	shark::ErrorFunction error(trainSet, &m_net, &loss);
+	shark::TwoNormRegularizer regularizer(error.numberOfVariables());
+	error.setRegularizer(regularization,&regularizer);
+
+	shark::RpropPlus optimizer;
+	error.init();
+	optimizer.init(error);
+	std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
+	unsigned int i=0;
+	do{
+		i++;
+		optimizer.step(error);
+		std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
+		if (this->m_WriteLearningCurve =true) 
+		{	
+		File << optimizer.solution().value << std::endl;
+		}
+	} while( !criterion.stop( optimizer.solution() ) );
+	//std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
+}
+
+
+
 template <class TInputValue, class NeuronType>
 bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & filename)
 {
@@ -396,7 +430,7 @@ void AutoencoderModel<TInputValue,NeuronType>
 	}
 	*/
 	data = m_net.evalLayer( m_net.layerMatrices().size()/2-1 ,data);   // features layer for a network containing the encoder and decoder part
-	std::cout << data.element(0) << std::endl;
+	
 	unsigned int id = startIndex;
 	target.SetSize(this->m_Dimension);
 	
diff --git a/include/DimensionalityReductionModelFactory.txx b/include/DimensionalityReductionModelFactory.txx
index 42eb47e76c8be9d44e53e3032e4af7e55ddae531..fb65be910ec931cc5b86a6be1c659babe1b33ac5 100644
--- a/include/DimensionalityReductionModelFactory.txx
+++ b/include/DimensionalityReductionModelFactory.txx
@@ -46,7 +46,7 @@ using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTa
 */
 
 template <class TInputValue, class TTargetValue>
-using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::LinearNeuron>  ;
+using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TanhNeuron>  ;
 
 
 template <class TInputValue, class TTargetValue>
diff --git a/include/cbLearningApplicationBaseDR.h b/include/cbLearningApplicationBaseDR.h
index 5814c3f3b555fd019a1d7c9da486b0fe8747ec79..084db66a4caab98a4dfffc51745c099c4dd16c90 100644
--- a/include/cbLearningApplicationBaseDR.h
+++ b/include/cbLearningApplicationBaseDR.h
@@ -103,7 +103,7 @@ public:
 #ifdef OTB_USE_SHARK
 
 	// typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
-	typedef shark::LinearNeuron NeuronType;
+	typedef shark::TanhNeuron NeuronType;
 	typedef otb::AutoencoderModel<InputValueType, NeuronType> AutoencoderModelType;
 	/*
 	// typedef shark::TiedAutoencoder< shark::TanhNeuron, shark::LinearNeuron> TiedAutoencoderType;
diff --git a/include/cbTrainAutoencoder.txx b/include/cbTrainAutoencoder.txx
index b44f99461a8f29ae072973012d5f1a7d0ad356d0..57d6b8614426fbd659db5b709ce9e1eb690eef40 100644
--- a/include/cbTrainAutoencoder.txx
+++ b/include/cbTrainAutoencoder.txx
@@ -143,7 +143,6 @@ void cbLearningApplicationBaseDR<TInputValue,TOutputValue>
 			rho[i]=std::stof(s_rho[i]);
 			beta[i]=std::stof(s_beta[i]);
 		}
-		std::cout << nb_neuron << std::endl;
 		dimredTrainer->SetNumberOfHiddenNeurons(nb_neuron);
 		dimredTrainer->SetNumberOfIterations(GetParameterInt("model.autoencoder.nbiter"));
 		dimredTrainer->SetEpsilon(GetParameterFloat("model.autoencoder.epsilon"));