Skip to content
Snippets Groups Projects
Commit 7187352c authored by Cédric Traizet's avatar Cédric Traizet
Browse files

missing file in the last commit added

parent 25e4c6a2
No related branches found
No related tags found
1 merge request!4Dimensionality reduction algorithms
OTB_CREATE_APPLICATION(
NAME CbDimensionalityReductionTrainer
SOURCES cbDimensionalityReductionTrainer.cxx
LINK_LIBRARIES ${${otb-module}_LIBRARIES}
LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${OTBCommon_LIBRARIES} ${OTBITK_LIBRARIES} ${OTBBoost_LIBRARIES} ${OTBShark_LIBRARIES}
)
OTB_CREATE_APPLICATION(
......
#include "otbWrapperApplication.h"
#include "otbWrapperApplicationFactory.h"
#include "otbWrapperChoiceParameter.h"
#include <iostream>
#include "otbImage.h"
#include "otbVectorImage.h"
#include <shark/Models/Autoencoder.h>//normal autoencoder model
#include <shark/Models/TiedAutoencoder.h>//autoencoder with tied weights
#include <shark/Models/Normalizer.h>
#include "encode_filter.h"
//#include "dummy_filter.h"
#include "dummy_filter.h"
namespace otb
{
......@@ -31,6 +28,11 @@ public:
typedef itk::SmartPointer<const Self> ConstPointer;
using image_type = FloatVectorImageType;
typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
using FilterType = EncodeFilter<image_type, AutoencoderType, shark::Normalizer<shark::RealVector>> ;
/** Standard macro */
itkNewMacro(Self);
itkTypeMacro(CbDimensionalityReduction, otb::Application);
......@@ -62,36 +64,33 @@ private:
void DoExecute()
{
typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
using image_type = FloatVectorImageType;
using FilterType = EncodeFilter<image_type, AutoencoderType, shark::Normalizer<shark::RealVector>> ;
using DummyFilterType = DummyFilter<image_type> ;
std::cout << "Appli !" << std::endl;
std::cout << "Appli" << std::endl;
FloatVectorImageType::Pointer inImage = GetParameterImage("in");
std::string encoderPath = GetParameterString("model");
std::string normalizerPath = GetParameterString("normalizer");
FilterType::Pointer filter = FilterType::New();
filter = FilterType::New();
filter->SetAutoencoderModel(encoderPath);
filter->SetNormalizerModel(normalizerPath);
filter->SetInput(inImage);
filter->Update();
//filter->Update();
SetParameterOutputImage("out", filter->GetOutput());
/*
using DummyFilterType = DummyFilter<image_type> ;
DummyFilterType::Pointer dummy_filter = DummyFilterType::New(); // this filter simply copies the input image (do not need shark library)
dummy_filter->SetInput(GetParameterFloatVectorImage("in"));
dummy_filter->Update();
dummy_filter->UpdateOutputInformation();
SetParameterOutputImage("out", dummy_filter->GetOutput());
*/
//SetParameterOutputImage("out", inImage); // copy input image
}
FilterType::Pointer filter;
};
}
}
......
#include "otbWrapperApplication.h"
#include "otbWrapperApplicationFactory.h"
#include "otbWrapperChoiceParameter.h"
#include "otbOGRDataSourceWrapper.h"
#include "otbOGRFeatureWrapper.h"
#include "itkVariableLengthVector.h"
#include "AutoencoderModel.h"
#include "otbSharkUtils.h"
//include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h>
#include <shark/Algorithms/GradientDescent/Rprop.h>// the RProp optimization algorithm
#include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
#include <fstream> // write the model file
#include <shark/Models/Autoencoder.h>//normal autoencoder model
#include <shark/Models/TiedAutoencoder.h>//autoencoder with tied weights
#include <shark/Models/Normalizer.h>
#include <shark/Algorithms/Trainers/NormalizeComponentsUnitVariance.h>
template<class AutoencoderModel>
AutoencoderModel trainAutoencoderModel(
shark::UnlabeledData<shark::RealVector> const& data,//the data to train with
std::size_t numHidden,//number of features in the autoencoder
std::size_t iterations, //number of iterations to optimize
double regularisation//strength of the regularisation
){
//create the model
std::size_t inputs = dataDimension(data);
AutoencoderModel model;
model.setStructure(inputs, numHidden);
initRandomUniform(model,-0.1*std::sqrt(1.0/inputs),0.1*std::sqrt(1.0/inputs));
//create the objective function
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(data,data);//labels identical to inputs
shark::SquaredLoss<shark::RealVector> loss;
shark::ErrorFunction error(trainSet, &model, &loss);
shark::TwoNormRegularizer regularizer(error.numberOfVariables());
error.setRegularizer(regularisation,&regularizer);
shark::IRpropPlusFull optimizer;
error.init();
optimizer.init(error);
std::cout<<"Optimizing model: "+model.name()<<std::endl;
for(std::size_t i = 0; i != iterations; ++i){
optimizer.step(error);
std::cout<<i<<" "<<optimizer.solution().value<<std::endl;
}
//std::cout<<optimizer.solution().value<<std::endl;
model.setParameterVector(optimizer.solution().point);
return model;
}
shark::Normalizer<shark::RealVector> trainNormalizer(const shark::UnlabeledData<shark::RealVector>& data)
{
bool removeMean = true;
shark::Normalizer<shark::RealVector> normalizer;
shark::NormalizeComponentsUnitVariance<shark::RealVector> normalizingTrainer(removeMean);
normalizingTrainer.train(normalizer, data);
return normalizer;
}
namespace otb
{
namespace Wrapper
......@@ -15,16 +79,27 @@ public:
typedef CbDimensionalityReductionTrainer Self;
typedef itk::SmartPointer<Self> Pointer;
itkNewMacro(Self);
itkTypeMacro(CbDimensionalityReductionTrainer, otb::Wrapper::Application);
itkTypeMacro(CbDimensionalityReductionTrainer, otb::Application);
typedef double ValueType;
typedef itk::VariableLengthVector<ValueType> InputSampleType;
typedef itk::Statistics::ListSample<InputSampleType> ListSampleType;
typedef itk::VariableLengthVector<ValueType> MeasurementType;
private:
void DoInit()
{
SetName("CbDimensionalityReductionTrainer");
SetDescription("Trainer for the dimensionality reduction algorithms used in the cbDimensionalityReduction application.");
AddParameter(ParameterType_InputVectorData, "train", "Name of the input training vector data");
SetParameterDescription("train","The vector data used for training.");
AddParameter(ParameterType_StringList, "feat", "Field names to be calculated."); //
SetParameterDescription("feat","List of field names in the input vector data used as features for training."); //
/*
AddParameter(ParameterType_InputVectorData, "val", "Name of the input validation vector data");
SetParameterDescription("val","The vector data used for validation.");
......@@ -38,14 +113,60 @@ private:
void DoExecute()
{
std::cout << "Appli !" << std::endl;
std::string shapefile = GetParameterString("train");
otb::ogr::DataSource::Pointer source = otb::ogr::DataSource::New(shapefile, otb::ogr::DataSource::Modes::Read);
otb::ogr::Layer layer = source->GetLayer(0);
//ListSampleType::Pointer input = ListSampleType::New();
//const int nbFeatures = GetSelectedItems("feat").size();
//input->SetMeasurementVectorSize(nbFeatures);
ListSampleType::Pointer input = ListSampleType::New();
const int nbFeatures = GetParameterStringList("feat").size();
input->SetMeasurementVectorSize(nbFeatures);
otb::ogr::Layer::const_iterator it = layer.cbegin();
otb::ogr::Layer::const_iterator itEnd = layer.cend();
for( ; it!=itEnd ; ++it)
{
MeasurementType mv;
mv.SetSize(nbFeatures);
for(int idx=0; idx < nbFeatures; ++idx)
{
mv[idx] = (*it)[GetParameterStringList("feat")[idx]].GetValue<double>();
}
input->PushBack(mv);
}
std::cout << input << std::endl;
std::vector<shark::RealVector> features;
otb::Shark::ListSampleToSharkVector<ListSampleType>( input, features);
shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange( features );
std::size_t numHidden= 5; // stoi convert string to int
std::size_t iterations = 10;
double regularisation = 0; // stod convert string to double
shark::Normalizer<shark::RealVector> normalizer = trainNormalizer(inputSamples);
inputSamples = normalizer(inputSamples);
std::cout << "normalizer trained and training set normalized" << std::endl;
typedef shark::Autoencoder< shark::TanhNeuron, shark::LinearNeuron> AutoencoderType;
AutoencoderType net = trainAutoencoderModel<AutoencoderType>(inputSamples,numHidden,iterations,regularisation);
std::cout << "autoencoder trained !!" << std::endl;
// save the model to the file "net.model"
std::ofstream ofs("net.model");
boost::archive::polymorphic_text_oarchive oa(ofs);
net.write(oa);
ofs.close();
// save the model to the file "net.model"
std::ofstream norm_ofs("normalizer.model");
boost::archive::polymorphic_text_oarchive onorm(norm_ofs);
normalizer.write(onorm);
norm_ofs.close();
}
};
......
......@@ -6,6 +6,7 @@ otb_module(CbDimensionalityReduction
OTBITK
OTBShark
OTBBoost
OTBSupervised
DESCRIPTION
"${DOCUMENTATION}"
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment