From 2c525d9391ef076ec440b070bd8678f3b3f81808 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Traizet?= <traizetc@cesbio.cnes.fr> Date: Tue, 27 Jun 2017 14:50:39 +0200 Subject: [PATCH] Training an autoencoder now outputs a learning curve (txt file), work in progress --- include/AutoencoderModel.h | 10 ++++++---- include/AutoencoderModel.txx | 21 +++++++++++++++------ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/include/AutoencoderModel.h b/include/AutoencoderModel.h index 72dd370568..c6038427d2 100644 --- a/include/AutoencoderModel.h +++ b/include/AutoencoderModel.h @@ -3,7 +3,7 @@ #include "otbMachineLearningModelTraits.h" #include "otbMachineLearningModel.h" - +#include <fstream> #include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h> namespace otb @@ -68,10 +68,10 @@ public: void Train() ITK_OVERRIDE; template <class T> - void TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double, shark::Data<shark::RealVector> &); + void TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double, shark::Data<shark::RealVector> &, std::ostream&); template <class T> - void TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double,double, shark::Data<shark::RealVector> &); + void TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int,double, double,double, shark::Data<shark::RealVector> &, std::ostream&); protected: AutoencoderModel(); @@ -85,7 +85,9 @@ private: /** Network attributes */ std::vector<AutoencoderType> m_net; - + + typename InputListSampleType::Pointer m_ValidationListSample; // This list can optionnaly be used during training to prevent overfitting. + itk::Array<unsigned int> m_NumberOfHiddenNeurons; /** Training parameters */ diff --git a/include/AutoencoderModel.txx b/include/AutoencoderModel.txx index fd71a73452..ecd39a468f 100644 --- a/include/AutoencoderModel.txx +++ b/include/AutoencoderModel.txx @@ -43,6 +43,9 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Train() shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features ); + std::ofstream ofs("/mnt/data/home/traizetc/computation/learning_curve.txt"); //learning curve + ofs << "learning curve" << std::endl; + if (m_Epsilon > 0){ shark::TrainingProgress<> criterion(5,m_Epsilon); @@ -50,11 +53,11 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Train() { if (m_Noise[i] != 0) // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen { - TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples); + TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples,ofs); } else { - TrainOneSparseLayer( criterion,m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples); + TrainOneSparseLayer( criterion,m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i],inputSamples, ofs); } criterion.reset(); } @@ -68,11 +71,11 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Train() { if (m_Noise[i] != 0) // Shark doesn't allow to train a layer using a sparsity term AND a noisy input. (shark::SparseAutoencoderError takes an autoen { - TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples); + TrainOneLayer(criterion, m_NumberOfHiddenNeurons[i],m_Noise[i],m_Regularization[i], inputSamples, ofs); } else { - TrainOneSparseLayer(criterion, m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples); + TrainOneSparseLayer(criterion, m_NumberOfHiddenNeurons[i],m_Rho[i],m_Beta[i],m_Regularization[i], inputSamples, ofs); } criterion.reset(); } @@ -82,7 +85,7 @@ void AutoencoderModel<TInputValue,AutoencoderType>::Train() template <class TInputValue, class AutoencoderType> template <class T> -void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples) +void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double noise_strength,double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File) { AutoencoderType net; @@ -103,10 +106,15 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::Abstrac optimizer.init(error); std::cout<<"error before training : " << optimizer.solution().value<<std::endl; + + File << "end layer" << std::endl; + + unsigned int i=0; do{ i++; optimizer.step(error); + File << optimizer.solution().value << std::endl; } while( !criterion.stop( optimizer.solution() ) ); std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl; @@ -118,7 +126,7 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneLayer(shark::Abstrac template <class TInputValue, class AutoencoderType> template <class T> -void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples) +void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(shark::AbstractStoppingCriterion<T> & criterion, unsigned int nbneuron,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File) { AutoencoderType net; @@ -141,6 +149,7 @@ void AutoencoderModel<TInputValue,AutoencoderType>::TrainOneSparseLayer(shark::A do{ i++; optimizer.step(error); + File << optimizer.solution().value << std::endl; } while( !criterion.stop( optimizer.solution() ) ); std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl; -- GitLab