Commit 81bed5ae authored by Victor Poughon's avatar Victor Poughon

Merge remote-tracking branch 'origin/revert_shark' into release-6.6

parents f3749c05 b68d86d8
......@@ -291,15 +291,12 @@ protected:
itkExceptionMacro(<< "File : " << modelFileName << " couldn't be opened");
}
// get the line with the centroids (starts with "2 ")
// get the end line with the centroids
std::string line, centroidLine;
while(std::getline(infile,line))
{
if (line.size() > 2 && line[0] == '2' && line[1] == ' ')
{
if (!line.empty())
centroidLine = line;
break;
}
}
std::vector<std::string> centroidElm;
......
......@@ -33,9 +33,8 @@
#endif
#include "otb_shark.h"
#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
#include <shark/Models/LinearModel.h>
#include <shark/Models/ConcatenatedModel.h>
#include <shark/Models/NeuronLayers.h>
#include <shark/Models/FFNet.h>
#include <shark/Models/Autoencoder.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
......@@ -77,9 +76,9 @@ public:
typedef typename Superclass::ConfidenceListSampleType ConfidenceListSampleType;
/// Neural network related typedefs
typedef shark::ConcatenatedModel<shark::RealVector> ModelType;
typedef shark::LinearModel<shark::RealVector,NeuronType> LayerType;
typedef shark::LinearModel<shark::RealVector, shark::LinearNeuron> OutLayerType;
typedef shark::Autoencoder<NeuronType,shark::LinearNeuron> OutAutoencoderType;
typedef shark::Autoencoder<NeuronType,NeuronType> AutoencoderType;
typedef shark::FFNet<NeuronType,shark::LinearNeuron> NetworkType;
itkNewMacro(Self);
itkTypeMacro(AutoencoderModel, DimensionalityReductionModel);
......@@ -128,16 +127,18 @@ public:
void Train() override;
template <class T>
template <class T, class Autoencoder>
void TrainOneLayer(
shark::AbstractStoppingCriterion<T> & criterion,
Autoencoder &,
unsigned int,
shark::Data<shark::RealVector> &,
std::ostream&);
template <class T>
template <class T, class Autoencoder>
void TrainOneSparseLayer(
shark::AbstractStoppingCriterion<T> & criterion,
Autoencoder &,
unsigned int,
shark::Data<shark::RealVector> &,
std::ostream&);
......@@ -165,9 +166,7 @@ protected:
private:
/** Internal Network */
ModelType m_Encoder;
std::vector<LayerType> m_InLayers;
OutLayerType m_OutLayer;
NetworkType m_Net;
itk::Array<unsigned int> m_NumberOfHiddenNeurons;
/** Training parameters */
unsigned int m_NumberOfIterations; // stop the training after a fixed number of iterations
......
......@@ -34,17 +34,18 @@
#include "otbSharkUtils.h"
//include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h>
//~ #include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
#include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
#include <shark/Algorithms/GradientDescent/Rprop.h>// the RProp optimization algorithm
#include <shark/ObjectiveFunctions/Loss/SquaredLoss.h> // squared loss used for regression
#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
//~ #include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
#include <shark/Models/ImpulseNoiseModel.h> //noise source to corrupt the inputs
#include <shark/Models/ConcatenatedModel.h>//to concatenate the noise with the model
#include <shark/Algorithms/StoppingCriteria/MaxIterations.h> //A simple stopping criterion that stops after a fixed number of iterations
#include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
#include <shark/Algorithms/GradientDescent/Adam.h>
#include <shark/Algorithms/GradientDescent/SteepestDescent.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
......@@ -82,56 +83,96 @@ AutoencoderModel<TInputValue,NeuronType>
}
// Initialization of the feed forward neural network
m_Encoder = ModelType();
m_InLayers.clear();
size_t previousShape = shark::dataDimension(inputSamples);
std::vector<size_t> layers;
layers.push_back(shark::dataDimension(inputSamples));
for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
{
m_InLayers.push_back( LayerType(previousShape, m_NumberOfHiddenNeurons[i]) );
previousShape = m_NumberOfHiddenNeurons[i];
m_Encoder.add(&(m_InLayers.back()), true);
layers.push_back(m_NumberOfHiddenNeurons[i]);
}
for (unsigned int i = std::max(0,static_cast<int>(m_NumberOfHiddenNeurons.Size()-1)) ; i > 0; --i)
{
m_InLayers.push_back( LayerType(previousShape, m_NumberOfHiddenNeurons[i-1]) );
previousShape = m_NumberOfHiddenNeurons[i-1];
layers.push_back(m_NumberOfHiddenNeurons[i-1]);
}
m_OutLayer = OutLayerType(previousShape, shark::dataDimension(inputSamples));
// Training of the autoencoders pairwise, starting from the first and last layers
for (unsigned int i = 0 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
layers.push_back(shark::dataDimension(inputSamples));
m_Net.setStructure(layers);
shark::initRandomNormal(m_Net,0.1);
// Training of the first Autoencoder (first and last layer of the FF network)
if (m_Epsilon > 0)
{
shark::TrainingProgress<> criterion(5,m_Epsilon);
OutAutoencoderType net;
// Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
if (m_Noise[0] != 0)
{
TrainOneLayer(criterion, net, 0, inputSamples, ofs);
}
else
{
TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
}
criterion.reset();
}
else
{
if (m_Epsilon > 0)
shark::MaxIterations<> criterion(m_NumberOfIterations);
OutAutoencoderType net;
// Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
if (m_Noise[0] != 0)
{
shark::TrainingProgress<> criterion(5,m_Epsilon);
TrainOneLayer(criterion, net, 0, inputSamples, ofs);
otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
}
else
{
TrainOneSparseLayer(criterion, net, 0, inputSamples, ofs);
}
criterion.reset();
}
// Training of the other autoencoders
if (m_Epsilon > 0)
{
shark::TrainingProgress<> criterion(5,m_Epsilon);
for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
{
AutoencoderType net;
// Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
if (m_Noise[i] != 0)
{
TrainOneLayer(criterion, i, inputSamples, ofs);
TrainOneLayer(criterion, net, i, inputSamples, ofs);
}
else
{
TrainOneSparseLayer(criterion, i, inputSamples, ofs);
TrainOneSparseLayer(criterion, net, i, inputSamples, ofs);
}
criterion.reset();
}
else
}
else
{
shark::MaxIterations<> criterion(m_NumberOfIterations);
for (unsigned int i = 1 ; i < m_NumberOfHiddenNeurons.Size(); ++i)
{
shark::MaxIterations<> criterion(m_NumberOfIterations);
AutoencoderType net;
// Shark doesn't allow to train a layer using a sparsity term AND a noisy input.
if (m_Noise[i] != 0)
{
TrainOneLayer(criterion, i, inputSamples, ofs);
TrainOneLayer(criterion, net, i, inputSamples, ofs);
otbMsgDevMacro(<< "m_Noise " << m_Noise[0]);
}
else
{
TrainOneSparseLayer( criterion, i, inputSamples, ofs);
TrainOneSparseLayer( criterion, net, i, inputSamples, ofs);
}
criterion.reset();
}
// encode the samples with the last encoder trained
inputSamples = m_InLayers[i](inputSamples);
}
if (m_NumberOfIterationsFineTuning > 0)
{
......@@ -142,37 +183,31 @@ AutoencoderModel<TInputValue,NeuronType>
}
template <class TInputValue, class NeuronType>
template <class T>
template <class T, class Autoencoder>
void
AutoencoderModel<TInputValue,NeuronType>
::TrainOneLayer(
shark::AbstractStoppingCriterion<T> & criterion,
Autoencoder & net,
unsigned int layer_index,
shark::Data<shark::RealVector> &samples,
std::ostream& File)
{
typedef shark::AbstractModel<shark::RealVector,shark::RealVector> BaseModelType;
ModelType net;
net.add(&(m_InLayers[layer_index]), true);
net.add( (layer_index ?
(BaseModelType*) &(m_InLayers[m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index]) :
(BaseModelType*) &m_OutLayer) , true);
otbMsgDevMacro(<< "Noise " << m_Noise[layer_index]);
std::size_t inputs = dataDimension(samples);
net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
//~ shark::ImpulseNoiseModel noise(inputs,m_Noise[layer_index],1.0); //set an input pixel with probability m_Noise to 0
//~ shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
shark::ImpulseNoiseModel noise(inputs,m_Noise[layer_index],1.0); //set an input pixel with probability m_Noise to 0
shark::ConcatenatedModel<shark::RealVector,shark::RealVector> model = noise>> net;
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
shark::SquaredLoss<shark::RealVector> loss;
//~ shark::ErrorFunction error(trainSet, &model, &loss);
shark::ErrorFunction<> error(trainSet, &net, &loss);
shark::ErrorFunction error(trainSet, &model, &loss);
shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
shark::TwoNormRegularizer regularizer(error.numberOfVariables());
error.setRegularizer(m_Regularization[layer_index],&regularizer);
shark::Adam<> optimizer;
shark::IRpropPlusFull optimizer;
error.init();
optimizer.init(error);
......@@ -195,37 +230,35 @@ AutoencoderModel<TInputValue,NeuronType>
} while( !criterion.stop( optimizer.solution() ) );
net.setParameterVector(optimizer.solution().point);
m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias()); // Copy the encoder in the FF neural network
m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
samples = net.encode(samples);
}
template <class TInputValue, class NeuronType>
template <class T>
template <class T, class Autoencoder>
void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(
shark::AbstractStoppingCriterion<T> & criterion,
Autoencoder & net,
unsigned int layer_index,
shark::Data<shark::RealVector> &samples,
std::ostream& File)
{
typedef shark::AbstractModel<shark::RealVector,shark::RealVector> BaseModelType;
ModelType net;
net.add(&(m_InLayers[layer_index]), true);
net.add( (layer_index ?
(BaseModelType*) &(m_InLayers[m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index]) :
(BaseModelType*) &m_OutLayer) , true);
//AutoencoderType net;
std::size_t inputs = dataDimension(samples);
net.setStructure(inputs, m_NumberOfHiddenNeurons[layer_index]);
shark::initRandomUniform(net,-m_InitFactor*std::sqrt(1.0/inputs),m_InitFactor*std::sqrt(1.0/inputs));
// Idea : set the initials value for the output weights higher than the input weights
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
shark::SquaredLoss<shark::RealVector> loss;
//~ shark::SparseAutoencoderError error(trainSet,&net, &loss, m_Rho[layer_index], m_Beta[layer_index]);
// SparseAutoencoderError doesn't exist anymore, for now use a plain ErrorFunction
shark::ErrorFunction<> error(trainSet, &net, &loss);
shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
shark::SparseAutoencoderError error(trainSet,&net, &loss, m_Rho[layer_index], m_Beta[layer_index]);
shark::TwoNormRegularizer regularizer(error.numberOfVariables());
error.setRegularizer(m_Regularization[layer_index],&regularizer);
shark::Adam<> optimizer;
shark::IRpropPlusFull optimizer;
error.init();
optimizer.init(error);
......@@ -246,6 +279,9 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(
File << "end layer" << std::endl;
}
net.setParameterVector(optimizer.solution().point);
m_Net.setLayer(layer_index,net.encoderMatrix(),net.hiddenBias()); // Copy the encoder in the FF neural network
m_Net.setLayer( m_NumberOfHiddenNeurons.Size()*2 - 1 - layer_index,net.decoderMatrix(),net.outputBias()); // Copy the decoder in the FF neural network
samples = net.encode(samples);
}
template <class TInputValue, class NeuronType>
......@@ -257,23 +293,15 @@ AutoencoderModel<TInputValue,NeuronType>
shark::Data<shark::RealVector> &samples,
std::ostream& File)
{
// create full network
ModelType net;
for (auto &layer : m_InLayers)
{
net.add(&layer, true);
}
net.add(&m_OutLayer, true);
//labels identical to inputs
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);
shark::SquaredLoss<shark::RealVector> loss;
shark::ErrorFunction<> error(trainSet, &net, &loss);
shark::TwoNormRegularizer<> regularizer(error.numberOfVariables());
shark::ErrorFunction error(trainSet, &m_Net, &loss);
shark::TwoNormRegularizer regularizer(error.numberOfVariables());
error.setRegularizer(m_Regularization[0],&regularizer);
shark::Adam<> optimizer;
shark::IRpropPlusFull optimizer;
error.init();
optimizer.init(error);
otbMsgDevMacro(<<"Error before training : " << optimizer.solution().value);
......@@ -298,6 +326,7 @@ AutoencoderModel<TInputValue,NeuronType>
try
{
this->Load(filename);
m_Net.name();
}
catch(...)
{
......@@ -321,15 +350,22 @@ AutoencoderModel<TInputValue,NeuronType>
{
otbMsgDevMacro(<< "saving model ...");
std::ofstream ofs(filename);
ofs << "Autoencoder" << std::endl; // the first line of the model file contains a key
ofs << (m_InLayers.size() + 1) << std::endl; // second line is the number of encoders/decoders
ofs << m_Net.name() << std::endl; // the first line of the model file contains a key
shark::TextOutArchive oa(ofs);
for (const auto &layer : m_InLayers)
oa << m_Net;
ofs.close();
if (this->m_WriteWeights == true) // output the map vectors in a txt file
{
oa << layer;
std::ofstream otxt(filename+".txt");
for (unsigned int i = 0 ; i < m_Net.layerMatrices().size(); ++i)
{
otxt << "layer " << i << std::endl;
otxt << m_Net.layerMatrix(i) << std::endl;
otxt << m_Net.bias(i) << std::endl;
otxt << std::endl;
}
}
oa << m_OutLayer;
ofs.close();
}
template <class TInputValue, class NeuronType>
......@@ -337,39 +373,23 @@ void
AutoencoderModel<TInputValue,NeuronType>
::Load(const std::string & filename, const std::string & /*name*/)
{
NetworkType net;
std::ifstream ifs(filename);
char buffer[256];
// check first line
ifs.getline(buffer,256);
std::string bufferStr(buffer);
if (bufferStr != "Autoencoder"){
char autoencoder[256];
ifs.getline(autoencoder,256);
std::string autoencoderstr(autoencoder);
if (autoencoderstr != net.name()){
itkExceptionMacro(<< "Error opening " << filename.c_str() );
}
// check second line
ifs.getline(buffer,256);
int nbLevels = boost::lexical_cast<int>(buffer);
if (nbLevels < 2 || nbLevels%2 == 1)
{
itkExceptionMacro(<< "Unexpected number of levels : "<<buffer );
}
m_InLayers.clear();
m_Encoder = ModelType();
shark::TextInArchive ia(ifs);
for (int i=0 ; (i+1) < nbLevels ; i++)
{
LayerType layer;
ia >> layer;
m_InLayers.push_back(layer);
}
ia >> m_OutLayer;
ia >> m_Net;
ifs.close();
for (int i=0 ; i < nbLevels/2 ; i++)
{
m_Encoder.add(&(m_InLayers[i]) ,true);
}
this->SetDimension( m_Encoder.outputShape()[0] );
// This gives us the dimension if we keep the encoder and decoder
size_t feature_layer_index = m_Net.layerMatrices().size()/2;
// number of neurons in the feature layer (second dimension of the first decoder weight matrix)
this->SetDimension(m_Net.layerMatrix(feature_layer_index).size2());
}
template <class TInputValue, class NeuronType>
......@@ -389,7 +409,7 @@ AutoencoderModel<TInputValue,NeuronType>
shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
// features layer for a network containing the encoder and decoder part
data = m_Encoder(data);
data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
TargetSampleType target;
target.SetSize(this->m_Dimension);
......@@ -415,7 +435,7 @@ AutoencoderModel<TInputValue,NeuronType>
shark::Data<shark::RealVector> data = shark::createDataFromRange(features);
TargetSampleType target;
// features layer for a network containing the encoder and decoder part
data = m_Encoder(data);
data = m_Net.evalLayer( m_Net.layerMatrices().size()/2-1 ,data);
unsigned int id = startIndex;
target.SetSize(this->m_Dimension);
......
......@@ -137,11 +137,11 @@ PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*
ifs.close();
if (this->m_Dimension ==0)
{
this->m_Dimension = m_Encoder.outputShape()[0];
this->m_Dimension = m_Encoder.outputSize();
}
auto eigenvectors = m_Encoder.matrix();
eigenvectors.resize(this->m_Dimension,m_Encoder.inputShape()[0]);
eigenvectors.resize(this->m_Dimension,m_Encoder.inputSize());
m_Encoder.setStructure(eigenvectors, m_Encoder.offset() );
}
......
......@@ -28,11 +28,7 @@ otb_module(OTBLearningBase
OTBImageBase
OTBITK
OPTIONAL_DEPENDS
OTBShark
TEST_DEPENDS
OTBBoost
TEST_DEPENDS
OTBTestKernel
OTBImageIO
......
......@@ -32,10 +32,6 @@ otbKMeansImageClassificationFilterNew.cxx
otbMachineLearningModelTemplates.cxx
)
if(OTB_USE_SHARK)
set(OTBLearningBaseTests ${OTBLearningBaseTests} otbSharkUtilsTests.cxx)
endif()
add_executable(otbLearningBaseTestDriver ${OTBLearningBaseTests})
target_link_libraries(otbLearningBaseTestDriver ${OTBLearningBase-Test_LIBRARIES})
otb_module_target_label(otbLearningBaseTestDriver)
......@@ -72,7 +68,3 @@ otb_add_test(NAME leTuDecisionTreeNew COMMAND otbLearningBaseTestDriver
otb_add_test(NAME leTuKMeansImageClassificationFilterNew COMMAND otbLearningBaseTestDriver
otbKMeansImageClassificationFilterNew)
if(OTB_USE_SHARK)
otb_add_test(NAME leTuSharkNormalizeLabels COMMAND otbLearningBaseTestDriver
otbSharkNormalizeLabels)
endif()
......@@ -29,7 +29,4 @@ void RegisterTests()
REGISTER_TEST(otbSEMClassifierNew);
REGISTER_TEST(otbDecisionTreeNew);
REGISTER_TEST(otbKMeansImageClassificationFilterNew);
#ifdef OTB_USE_SHARK
REGISTER_TEST(otbSharkNormalizeLabels);
#endif
}
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "itkMacro.h"
#include "otbSharkUtils.h"
int otbSharkNormalizeLabels(int itkNotUsed(argc), char* itkNotUsed(argv) [])
{
std::vector<unsigned int> inLabels = {2, 2, 3, 20, 1};
std::vector<unsigned int> expectedDictionary = {2, 3, 20, 1};
std::vector<unsigned int> expectedLabels = {0, 0, 1, 2, 3};
auto newLabels = inLabels;
std::vector<unsigned int> labelDict;
otb::Shark::NormalizeLabelsAndGetDictionary(newLabels, labelDict);
if(newLabels != expectedLabels)
{
std::cout << "Wrong new labels\n";
for(size_t i = 0; i<newLabels.size(); ++i)
std::cout << "Got " << newLabels[i] << " expected " << expectedLabels[i] << '\n';
return EXIT_FAILURE;
}
if(labelDict != expectedDictionary)
{
std::cout << "Wrong dictionary\n";
for(size_t i = 0; i<labelDict.size(); ++i)
std::cout << "Got " << labelDict[i] << " expected " << expectedDictionary[i] << '\n';
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
......@@ -36,7 +36,6 @@
#pragma GCC diagnostic ignored "-Wheader-guard"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#endif
#include <shark/Models/Classifier.h>
#include "otb_shark.h"
#include "shark/Algorithms/Trainers/RFTrainer.h"
#if defined(__GNUC__) || defined(__clang__)
......@@ -137,10 +136,6 @@ public:
/** If true, margin confidence value will be computed */
itkSetMacro(ComputeMargin, bool);
/** If true, class labels will be normalised in [0 ... nbClasses] */
itkGetMacro(NormalizeClassLabels, bool);
itkSetMacro(NormalizeClassLabels, bool);
protected:
/** Constructor */
SharkRandomForestsMachineLearningModel();
......@@ -161,10 +156,8 @@ private:
SharkRandomForestsMachineLearningModel(const Self &); //purposely not implemented
void operator =(const Self&); //purposely not implemented
shark::RFClassifier<unsigned int> m_RFModel;
shark::RFTrainer<unsigned int> m_RFTrainer;
std::vector<unsigned int> m_ClassDictionary;
bool m_NormalizeClassLabels;
shark::RFClassifier m_RFModel;
shark::RFTrainer m_RFTrainer;
unsigned int m_NumberOfTrees;
unsigned int m_MTry;
......
......@@ -32,6 +32,7 @@
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#pragma GCC diagnostic ignored "-Wignored-qualifiers"
#endif
#include <shark/Models/Converter.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
......@@ -51,7 +52,6 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
this->m_ConfidenceIndex = true;
this->m_IsRegressionSupported = false;
this->m_IsDoPredictBatchMultiThreaded = true;
this->m_NormalizeClassLabels = true;
}
......@@ -76,17 +76,13 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
Shark::ListSampleToSharkVector(this->GetInputListSample(), features);
Shark::ListSampleToSharkVector(this->GetTargetListSample(), class_labels);
if(m_NormalizeClassLabels)
{
Shark::NormalizeLabelsAndGetDictionary(class_labels, m_ClassDictionary);
}
shark::ClassificationDataset TrainSamples = shark::createLabeledDataFromRange(features,class_labels);
//Set parameters
m_RFTrainer.setMTry(m_MTry);
m_RFTrainer.setNTrees(m_NumberOfTrees);
m_RFTrainer.setNodeSize(m_NodeSize);
// m_RFTrainer.setOOBratio(m_OobRatio);
m_RFTrainer.setOOBratio(m_OobRatio);
m_RFTrainer.train(m_RFModel, TrainSamples);
}
......@@ -129,20 +125,15 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
}
if (quality != ITK_NULLPTR)
{
shark::RealVector probas = m_RFModel.decisionFunction()(samples);
shark::RealVector probas = m_RFModel(samples);
(*quality) = ComputeConfidence(probas, m_ComputeMargin);
}
unsigned int res{0};
m_RFModel.eval(samples, res);
shark::ArgMaxConverter<shark::RFClassifier> amc;
amc.decisionFunction() = m_RFModel;
unsigned int res;
amc.eval(samples, res);
TargetSampleType target;
if(m_NormalizeClassLabels)
{
target[0] = m_ClassDictionary[static_cast<TOutputValue>(res)];
}
else
{
target[0] = static_cast<TOutputValue>(res);
}
target[0] = static_cast<TOutputValue>(res);
return target;
}
......@@ -166,13 +157,13 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
Shark::ListSampleRangeToSharkVector(input, features,startIndex,size);
shark::Data<shark::RealVector> inputSamples = shark::createDataFromRange(features);
#ifdef _OPENMP
#ifdef _OPENMP
omp_set_num_threads(itk::MultiThreader::GetGlobalDefaultNumberOfThreads());
#endif
#endif
if(quality != ITK_NULLPTR)
{
shark::Data<shark::RealVector> probas = m_RFModel.decisionFunction()(inputSamples);
shark::Data<shark::RealVector> probas = m_RFModel(inputSamples);
unsigned int id = startIndex;
for(shark::RealVector && p : probas.elements())
{
......@@ -184,19 +175,14 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
}
}
auto prediction = m_RFModel(inputSamples);
shark::ArgMaxConverter<shark::RFClassifier> amc;
amc.decisionFunction() = m_RFModel;
auto prediction = amc(inputSamples);
unsigned int id = startIndex;
for(const auto& p : prediction.elements())
{
TargetSampleType target;
if(m_NormalizeClassLabels)
{
target[0] = m_ClassDictionary[static_cast<TOutputValue>(p)];
}
else
{
target[0] = static_cast<TOutputValue>(p);
}
target[0] = static_cast<TOutputValue>(p);
targets->SetMeasurementVector(id,target);
++id;
}
......@@ -213,18 +199,7 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
itkExceptionMacro(<< "Error opening " << filename.c_str() );
}
// Add comment with model file name
ofs << "#" << m_RFModel.name();
if(m_NormalizeClassLabels) ofs << " with_dictionary";
ofs << std::endl;
if(m_NormalizeClassLabels)
{
ofs << m_ClassDictionary.size() << " ";
for(const auto& l : m_ClassDictionary)
{
ofs << l << " ";
}
ofs << std::endl;
}
ofs << "#" << m_RFModel.name() << std::endl;
shark::TextOutArchive oa(ofs);
m_RFModel.save(oa,0);
}
......@@ -244,10 +219,6 @@ SharkRandomForestsMachineLearningModel<TInputValue,TOutputValue>
{
if( line.find( m_RFModel.name() ) == std::string::npos )
itkExceptionMacro( "The model file : " + filename + " cannot be read." );