diff --git a/include/AutoencoderModel.txx b/include/AutoencoderModel.txx
index 847c096b59f6d512b6637a5bbf5942cf56446e9a..7103ec98eba48f7261fd068507af384f792c4744 100644
--- a/include/AutoencoderModel.txx
+++ b/include/AutoencoderModel.txx
@@ -100,7 +100,8 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::Abstrac
 
 	std::size_t inputs = dataDimension(samples);
 	net.setStructure(inputs, nbneuron);
-	initRandomUniform(net,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
+	//initRandomUniform(net,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
+	initRandomUniform(net,-1,1);
 	shark::ImpulseNoiseModel noise(noise_strength,0.0); //set an input pixel with probability m_Noise to 0
 	shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
 	shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
@@ -145,8 +146,8 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(shark::A
 
 	std::size_t inputs = dataDimension(samples);
 	net.setStructure(inputs, nbneuron);
-	initRandomUniform(net,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
-
+	//initRandomUniform(net,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
+	initRandomUniform(net,-1,1);
 	shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
 	shark::SquaredLoss<shark::RealVector> loss;
 	shark::SparseAutoencoderError error(trainSet,&net, &loss, rho, beta);
@@ -219,7 +220,33 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Save(const std::string & fil
 			otxt << m_net[i].encoderMatrix() << std::endl;
 			otxt << m_net[i].hiddenBias() << std::endl;
 		}
+		
+		
+		std::vector<shark::RealVector> features;
+	
+		shark::SquaredLoss<shark::RealVector> loss;
+		Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+		shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+		shark::Data<shark::RealVector> outputSamples = inputSamples;
+		
+		for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+		{
+			outputSamples = m_net[i].encode(outputSamples);
+		}
+		
+		for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
+		{
+			outputSamples = m_net[m_NumberOfHiddenNeurons.Size()-i-1].decode(outputSamples);   // We decode the data starting from the smallest layer 
+		}
+		otxt << "Reconstruction error : " << loss.eval(inputSamples,outputSamples) << std::endl; // the mean squared error is returned
+		std::cout << "Reconstruction error : " << loss.eval(inputSamples,outputSamples) << std::endl; // the mean squared error is returned
+		
+		std::cout << "in" << inputSamples.element(0) << std::endl;
+		std::cout << "out" << outputSamples.element(0) << std::endl;
+		
 		otxt.close();
+		
+		
 	}
 	
 	
diff --git a/include/DimensionalityReductionModelFactory.txx b/include/DimensionalityReductionModelFactory.txx
index a4cb4b87ed472c7e6a4e994962d461459d2c8327..23fcadddde79b3bbdf2973351f844523ca1b180d 100644
--- a/include/DimensionalityReductionModelFactory.txx
+++ b/include/DimensionalityReductionModelFactory.txx
@@ -35,11 +35,13 @@ namespace otb
 {
 
 template <class TInputValue, class TTargetValue>
-using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::Autoencoder<shark::LogisticNeuron, shark::LogisticNeuron>>  ;
+// using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::Autoencoder<shark::TanhNeuron, shark::LinearNeuron>>  ;
+using AutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::Autoencoder<shark::TanhNeuron, shark::TanhNeuron>>  ;
 
 
 template <class TInputValue, class TTargetValue>
-using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TiedAutoencoder< shark::LogisticNeuron, shark::LogisticNeuron>>  ;
+// using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TiedAutoencoder< shark::TanhNeuron, shark::LinearNeuron>>  ;
+using TiedAutoencoderModelFactory = AutoencoderModelFactoryBase<TInputValue, TTargetValue, shark::TiedAutoencoder< shark::TanhNeuron, shark::TanhNeuron>>  ;
 
 
 template <class TInputValue, class TTargetValue>
diff --git a/include/ImageDimensionalityReductionFilter.txx b/include/ImageDimensionalityReductionFilter.txx
index 4eee4d3c9f45aeba1b76c525094429a4baef2467..53a1739bd28fed73fe3f0623f1018dd12e124d56 100644
--- a/include/ImageDimensionalityReductionFilter.txx
+++ b/include/ImageDimensionalityReductionFilter.txx
@@ -134,7 +134,6 @@ void ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>::
 {
 	Superclass::GenerateOutputInformation();
     this->GetOutput()->SetNumberOfComponentsPerPixel( m_Model->GetDimension() );
-    std::cout << m_Model->GetDimension() << std::endl;
 }
 
 
diff --git a/include/PCAModel.txx b/include/PCAModel.txx
index dab5b6483a010da8f5498316e7a1f326db83cc32..e9ea1e2a2b94d3cc1331b65a2123f340087943f6 100644
--- a/include/PCAModel.txx
+++ b/include/PCAModel.txx
@@ -12,6 +12,8 @@
 #include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
 #include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
 
+#include <shark/ObjectiveFunctions/ErrorFunction.h>
+
 namespace otb
 {
 
@@ -83,9 +85,16 @@ void PCAModel<TInputValue>::Save(const std::string & filename, const std::string
 	{
 		std::ofstream otxt(filename+".txt");
 		
-		otxt << m_pca.eigenvectors() << std::endl;
-		otxt << m_pca.eigenvalues() << std::endl;
+		otxt << "Eigenvectors : " << m_pca.eigenvectors() << std::endl;
+		otxt << "Eigenvalues : " << m_pca.eigenvalues() << std::endl;
 		
+		std::vector<shark::RealVector> features;
+	
+		shark::SquaredLoss<shark::RealVector> loss;
+		Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
+		shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
+		otxt << "Reconstruction error : " << loss.eval(inputSamples,m_decoder(m_encoder(inputSamples))) << std::endl;
+		std::cout << "Reconstruction error : " << loss.eval(inputSamples,m_decoder(m_encoder(inputSamples))) << std::endl;
 		otxt.close();
 	}
 }
@@ -109,16 +118,13 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string
 	{
 		this->m_Dimension = m_encoder.outputSize();
 	}
-	else
-	{
-		std::cout << "yo" << std::endl;
-	}
+	
 	
 	auto eigenvectors = m_encoder.matrix();
 	eigenvectors.resize(this->m_Dimension,m_encoder.inputSize());
+	
 	m_encoder.setStructure(eigenvectors, m_encoder.offset() );
-	std::cout << m_encoder.matrix() << "end" << std::endl;
-	//this->m_Size = m_NumberOfHiddenNeurons;
+
 	
 	
 }
diff --git a/include/cbLearningApplicationBaseDR.h b/include/cbLearningApplicationBaseDR.h
index 3d90432fa95899e21de67a0695f6067dc6d81502..6207527baadd28134925ab380ea80401ca3466c3 100644
--- a/include/cbLearningApplicationBaseDR.h
+++ b/include/cbLearningApplicationBaseDR.h
@@ -101,10 +101,13 @@ public:
 	
 
 #ifdef OTB_USE_SHARK
-	typedef shark::Autoencoder< shark::LogisticNeuron, shark::LogisticNeuron> AutoencoderType;
+
+	// typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
+	typedef shark::Autoencoder< shark::TanhNeuron, shark::TanhNeuron> AutoencoderType;
 	typedef otb::AutoencoderModel<InputValueType, AutoencoderType> AutoencoderModelType;
 	
-	typedef shark::TiedAutoencoder< shark::LogisticNeuron, shark::LogisticNeuron> TiedAutoencoderType;
+	// typedef shark::TiedAutoencoder< shark::TanhNeuron, shark::LinearNeuron> TiedAutoencoderType;
+	typedef shark::TiedAutoencoder< shark::TanhNeuron, shark::TanhNeuron> TiedAutoencoderType;
 	typedef otb::AutoencoderModel<InputValueType, TiedAutoencoderType> TiedAutoencoderModelType;
 	
 	typedef otb::PCAModel<InputValueType> PCAModelType;