Skip to content
Snippets Groups Projects
Commit 8397fec0 authored by Guillaume Pasero's avatar Guillaume Pasero
Browse files

WRG: fix gazillion wrg from shark, fix assignments in if condition and other warnings

parent 2f9ddd67
No related branches found
No related tags found
No related merge requests found
Showing
with 84 additions and 40 deletions
...@@ -155,7 +155,7 @@ class VectorDimensionalityReduction : public Application ...@@ -155,7 +155,7 @@ class VectorDimensionalityReduction : public Application
/* /*
key.erase( std::remove_if(key.begin(),key.end(),IsNotAlphaNum), key.end()); key.erase( std::remove_if(key.begin(),key.end(),IsNotAlphaNum), key.end());
std::transform(key.begin(), key.end(), key.begin(), tolower);*/ std::transform(key.begin(), key.end(), key.begin(), tolower);*/
OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType(); //OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType();
/* if(fieldType == OFTInteger || ogr::version_proxy::IsOFTInteger64(fieldType) || fieldType == OFTReal) /* if(fieldType == OFTInteger || ogr::version_proxy::IsOFTInteger64(fieldType) || fieldType == OFTReal)
{*/ {*/
//std::string tmpKey="feat."+key; //std::string tmpKey="feat."+key;
...@@ -320,7 +320,7 @@ class VectorDimensionalityReduction : public Application ...@@ -320,7 +320,7 @@ class VectorDimensionalityReduction : public Application
// Add the field of prediction in the output layer if field not exist // Add the field of prediction in the output layer if field not exist
for (int i=0; i<GetParameterStringList("featout").size() ;i++) for (unsigned int i=0; i<GetParameterStringList("featout").size() ;i++)
{ {
OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn(); OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn();
int idx = layerDefn.GetFieldIndex(GetParameterStringList("featout")[i].c_str()); int idx = layerDefn.GetFieldIndex(GetParameterStringList("featout")[i].c_str());
......
...@@ -148,7 +148,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai ...@@ -148,7 +148,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai
regularization.SetSize(s_nbneuron.size()); regularization.SetSize(s_nbneuron.size());
rho.SetSize(s_nbneuron.size()); rho.SetSize(s_nbneuron.size());
beta.SetSize(s_nbneuron.size()); beta.SetSize(s_nbneuron.size());
for (int i=0; i<s_nbneuron.size(); i++){ for (unsigned int i=0; i<s_nbneuron.size(); i++){
nb_neuron[i]=std::stoi(s_nbneuron[i]); nb_neuron[i]=std::stoi(s_nbneuron[i]);
noise[i]=std::stof(s_noise[i]); noise[i]=std::stof(s_noise[i]);
regularization[i]=std::stof(s_regularization[i]); regularization[i]=std::stof(s_regularization[i]);
......
...@@ -125,14 +125,14 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue> ...@@ -125,14 +125,14 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv")); dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv"));
typename TemplateEstimatorType::SizeType size; typename TemplateEstimatorType::SizeType size;
std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s"); std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s");
for (int i=0; i<dim; i++){ for (unsigned int i=0; i<dim; i++){
size[i]=std::stoi(s[i]); size[i]=std::stoi(s[i]);
} }
dimredTrainer->SetMapSize(size); dimredTrainer->SetMapSize(size);
typename TemplateEstimatorType::SizeType radius; typename TemplateEstimatorType::SizeType radius;
std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n"); std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n");
for (int i=0; i<dim; i++){ for (unsigned int i=0; i<dim; i++){
radius[i]=std::stoi(n[i]); radius[i]=std::stoi(n[i]);
} }
dimredTrainer->SetNeighborhoodSizeInit(radius); dimredTrainer->SetNeighborhoodSizeInit(radius);
......
...@@ -4,10 +4,21 @@ ...@@ -4,10 +4,21 @@
#include "otbMachineLearningModelTraits.h" #include "otbMachineLearningModelTraits.h"
#include "otbMachineLearningModel.h" #include "otbMachineLearningModel.h"
#include <fstream> #include <fstream>
#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otb_shark.h"
#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
#include <shark/Models/FFNet.h> #include <shark/Models/FFNet.h>
#include <shark/Models/Autoencoder.h> #include <shark/Models/Autoencoder.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb namespace otb
{ {
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
......
#ifndef AutoencoderModel_txx #ifndef AutoencoderModel_txx
#define AutoencoderModel_txx #define AutoencoderModel_txx
#include "otbAutoencoderModel.h"
#include <fstream> #include <fstream>
#include <shark/Data/Dataset.h>
#include "itkMacro.h" #include "itkMacro.h"
#include "otbSharkUtils.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otbSharkUtils.h"
//include train function //include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h> #include <shark/ObjectiveFunctions/ErrorFunction.h>
#include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons #include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
...@@ -20,6 +28,9 @@ ...@@ -20,6 +28,9 @@
#include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time #include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
#include <shark/Algorithms/GradientDescent/SteepestDescent.h> #include <shark/Algorithms/GradientDescent/SteepestDescent.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb namespace otb
{ {
...@@ -46,7 +57,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train() ...@@ -46,7 +57,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
shark::Data<shark::RealVector> inputSamples_copy = inputSamples; shark::Data<shark::RealVector> inputSamples_copy = inputSamples;
std::ofstream ofs; std::ofstream ofs;
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
ofs.open(m_LearningCurveFileName); ofs.open(m_LearningCurveFileName);
ofs << "learning curve" << std::endl; ofs << "learning curve" << std::endl;
...@@ -176,7 +187,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop ...@@ -176,7 +187,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
optimizer.init(error); optimizer.init(error);
std::cout<<"error before training : " << optimizer.solution().value<<std::endl; std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
File << "end layer" << std::endl; File << "end layer" << std::endl;
} }
...@@ -185,7 +196,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop ...@@ -185,7 +196,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
do{ do{
i++; i++;
optimizer.step(error); optimizer.step(error);
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
File << optimizer.solution().value << std::endl; File << optimizer.solution().value << std::endl;
} }
...@@ -252,12 +263,12 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra ...@@ -252,12 +263,12 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
i++; i++;
optimizer.step(error); optimizer.step(error);
std::cout<<"error after " << i << "iterations : " << optimizer.solution().value <<std::endl; std::cout<<"error after " << i << "iterations : " << optimizer.solution().value <<std::endl;
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
File << optimizer.solution().value << std::endl; File << optimizer.solution().value << std::endl;
} }
} while( !criterion.stop( optimizer.solution() ) ); } while( !criterion.stop( optimizer.solution() ) );
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
File << "end layer" << std::endl; File << "end layer" << std::endl;
} }
...@@ -272,7 +283,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra ...@@ -272,7 +283,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
template <class T> template <class T>
void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File) void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double /*rho*/,double /*beta*/, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
{ {
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
...@@ -292,7 +303,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStopp ...@@ -292,7 +303,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStopp
i++; i++;
optimizer.step(error); optimizer.step(error);
std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl; std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
if (this->m_WriteLearningCurve =true) if (this->m_WriteLearningCurve == true)
{ {
File << optimizer.solution().value << std::endl; File << optimizer.solution().value << std::endl;
} }
...@@ -319,13 +330,13 @@ bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & f ...@@ -319,13 +330,13 @@ bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & f
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & filename) bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & /*filename*/)
{ {
return true; return true;
} }
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & name) void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & /*name*/)
{ {
std::cout << "saving model ..." << std::endl; std::cout << "saving model ..." << std::endl;
std::ofstream ofs(filename); std::ofstream ofs(filename);
...@@ -382,7 +393,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename ...@@ -382,7 +393,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename
} }
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & name) void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & /*name*/)
{ {
NetworkType net; NetworkType net;
...@@ -421,7 +432,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename ...@@ -421,7 +432,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType
AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{ {
shark::RealVector samples(value.Size()); shark::RealVector samples(value.Size());
...@@ -453,7 +464,7 @@ AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & valu ...@@ -453,7 +464,7 @@ AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & valu
template <class TInputValue, class NeuronType> template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType> void AutoencoderModel<TInputValue,NeuronType>
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const ::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
{ {
std::vector<shark::RealVector> features; std::vector<shark::RealVector> features;
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
#define AutoencoderModelFactory_h #define AutoencoderModelFactory_h
#include <shark/Models/TiedAutoencoder.h> //#include <shark/Models/TiedAutoencoder.h>
#include <shark/Models/Autoencoder.h> //#include <shark/Models/Autoencoder.h>
#include "itkObjectFactoryBase.h" #include "itkObjectFactoryBase.h"
#include "itkImageIOBase.h" #include "itkImageIOBase.h"
......
...@@ -20,9 +20,9 @@ ...@@ -20,9 +20,9 @@
#include "otbAutoencoderModelFactory.h" #include "otbAutoencoderModelFactory.h"
#include "otbAutoencoderModel.h"
#include "itkCreateObjectFunction.h" #include "itkCreateObjectFunction.h"
#include "otbAutoencoderModel.h"
#include "itkVersion.h" #include "itkVersion.h"
namespace otb namespace otb
......
...@@ -111,9 +111,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage> ...@@ -111,9 +111,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
// Define iterators // Define iterators
typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType; typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; //typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType; typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType;
typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
InputIteratorType inIt(inputPtr, outputRegionForThread); InputIteratorType inIt(inputPtr, outputRegionForThread);
OutputIteratorType outIt(outputPtr, outputRegionForThread); OutputIteratorType outIt(outputPtr, outputRegionForThread);
...@@ -155,9 +155,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage> ...@@ -155,9 +155,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
// Define iterators // Define iterators
typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType; typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType; //typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType; typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType;
typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType; //typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
InputIteratorType inIt(inputPtr, outputRegionForThread); InputIteratorType inIt(inputPtr, outputRegionForThread);
OutputIteratorType outIt(outputPtr, outputRegionForThread); OutputIteratorType outIt(outputPtr, outputRegionForThread);
......
...@@ -4,7 +4,17 @@ ...@@ -4,7 +4,17 @@
#include "otbMachineLearningModelTraits.h" #include "otbMachineLearningModelTraits.h"
#include "otbMachineLearningModel.h" #include "otbMachineLearningModel.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otb_shark.h"
#include <shark/Algorithms/Trainers/PCA.h> #include <shark/Algorithms/Trainers/PCA.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb namespace otb
{ {
......
...@@ -2,9 +2,16 @@ ...@@ -2,9 +2,16 @@
#ifndef PCAModel_txx #ifndef PCAModel_txx
#define PCAModel_txx #define PCAModel_txx
#include "otbPCAModel.h"
#include <fstream> #include <fstream>
#include <shark/Data/Dataset.h>
#include "itkMacro.h" #include "itkMacro.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otbSharkUtils.h" #include "otbSharkUtils.h"
//include train function //include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h> #include <shark/ObjectiveFunctions/ErrorFunction.h>
...@@ -13,6 +20,9 @@ ...@@ -13,6 +20,9 @@
#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation #include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
#include <shark/ObjectiveFunctions/ErrorFunction.h> #include <shark/ObjectiveFunctions/ErrorFunction.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb namespace otb
{ {
...@@ -65,13 +75,13 @@ bool PCAModel<TInputValue>::CanReadFile(const std::string & filename) ...@@ -65,13 +75,13 @@ bool PCAModel<TInputValue>::CanReadFile(const std::string & filename)
template <class TInputValue> template <class TInputValue>
bool PCAModel<TInputValue>::CanWriteFile(const std::string & filename) bool PCAModel<TInputValue>::CanWriteFile(const std::string & /*filename*/)
{ {
return true; return true;
} }
template <class TInputValue> template <class TInputValue>
void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & name) void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & /*name*/)
{ {
std::ofstream ofs(filename); std::ofstream ofs(filename);
//ofs << m_encoder.name() << std::endl; //first line //ofs << m_encoder.name() << std::endl; //first line
...@@ -98,7 +108,7 @@ otxt.close(); ...@@ -98,7 +108,7 @@ otxt.close();
} }
template <class TInputValue> template <class TInputValue>
void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & name) void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*name*/)
{ {
std::ifstream ifs(filename); std::ifstream ifs(filename);
char encoder[256]; char encoder[256];
...@@ -130,7 +140,7 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string ...@@ -130,7 +140,7 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string
template <class TInputValue> template <class TInputValue>
typename PCAModel<TInputValue>::TargetSampleType typename PCAModel<TInputValue>::TargetSampleType
PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{ {
shark::RealVector samples(value.Size()); shark::RealVector samples(value.Size());
for(size_t i = 0; i < value.Size();i++) for(size_t i = 0; i < value.Size();i++)
...@@ -156,7 +166,7 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT ...@@ -156,7 +166,7 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT
template <class TInputValue> template <class TInputValue>
void PCAModel<TInputValue> void PCAModel<TInputValue>
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const ::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
{ {
std::vector<shark::RealVector> features; std::vector<shark::RealVector> features;
......
#ifndef SOMModel_txx #ifndef SOMModel_txx
#define SOMModel_txx #define SOMModel_txx
#include "otbSOMModel.h"
#include "otbImageFileReader.h" #include "otbImageFileReader.h"
#include "otbImageFileWriter.h" #include "otbImageFileWriter.h"
...@@ -68,7 +70,7 @@ bool SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filena ...@@ -68,7 +70,7 @@ bool SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filena
template <class TInputValue, unsigned int MapDimension> template <class TInputValue, unsigned int MapDimension>
bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & filename) bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & /*filename*/)
{ {
return true; return true;
} }
...@@ -91,7 +93,7 @@ std::istream & binary_read(std::istream& stream, T& value){ ...@@ -91,7 +93,7 @@ std::istream & binary_read(std::istream& stream, T& value){
template <class TInputValue, unsigned int MapDimension> template <class TInputValue, unsigned int MapDimension>
void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & name) void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & /*name*/)
{ {
itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion()); itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion());
inputIterator.GoToBegin(); inputIterator.GoToBegin();
...@@ -133,7 +135,7 @@ void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, con ...@@ -133,7 +135,7 @@ void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, con
} }
template <class TInputValue, unsigned int MapDimension> template <class TInputValue, unsigned int MapDimension>
void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & name) void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & /*name*/)
{ {
std::ifstream ifs(filename, std::ios::binary); std::ifstream ifs(filename, std::ios::binary);
...@@ -154,7 +156,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con ...@@ -154,7 +156,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
SizeType size; SizeType size;
itk::Index< MapDimension > index; itk::Index< MapDimension > index;
for (int i=0 ; i<MapDimension; i++) for (unsigned int i=0 ; i<MapDimension; i++)
{ {
binary_read(ifs,size[i]); binary_read(ifs,size[i]);
index[i]=0; index[i]=0;
...@@ -174,7 +176,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con ...@@ -174,7 +176,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
std::string value; std::string value;
while(!outputIterator.IsAtEnd()){ while(!outputIterator.IsAtEnd()){
InputSampleType vect(numberOfElements); InputSampleType vect(numberOfElements);
for (int i=0 ; i<numberOfElements; i++) for (unsigned int i=0 ; i<numberOfElements; i++)
{ {
float v; // InputValue type is not the same during training anddimredvector. float v; // InputValue type is not the same during training anddimredvector.
binary_read(ifs,v); binary_read(ifs,v);
...@@ -191,13 +193,13 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con ...@@ -191,13 +193,13 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
template <class TInputValue, unsigned int MapDimension> template <class TInputValue, unsigned int MapDimension>
typename SOMModel<TInputValue, MapDimension>::TargetSampleType typename SOMModel<TInputValue, MapDimension>::TargetSampleType
SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{ {
TargetSampleType target; TargetSampleType target;
target.SetSize(this->m_Dimension); target.SetSize(this->m_Dimension);
auto winner =m_SOMMap->GetWinner(value); auto winner =m_SOMMap->GetWinner(value);
for (int i=0; i< this->m_Dimension ;i++) { for (unsigned int i=0; i< this->m_Dimension ;i++) {
target[i] = winner.GetElement(i); target[i] = winner.GetElement(i);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment