Commit 8397fec0 authored by Guillaume Pasero's avatar Guillaume Pasero

WRG: fix gazillion wrg from shark, fix assignments in if condition and other warnings

parent 2f9ddd67
......@@ -155,7 +155,7 @@ class VectorDimensionalityReduction : public Application
/*
key.erase( std::remove_if(key.begin(),key.end(),IsNotAlphaNum), key.end());
std::transform(key.begin(), key.end(), key.begin(), tolower);*/
OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType();
//OGRFieldType fieldType = layerDefn.GetFieldDefn(iField)->GetType();
/* if(fieldType == OFTInteger || ogr::version_proxy::IsOFTInteger64(fieldType) || fieldType == OFTReal)
{*/
//std::string tmpKey="feat."+key;
......@@ -320,7 +320,7 @@ class VectorDimensionalityReduction : public Application
// Add the field of prediction in the output layer if field not exist
for (int i=0; i<GetParameterStringList("featout").size() ;i++)
for (unsigned int i=0; i<GetParameterStringList("featout").size() ;i++)
{
OGRFeatureDefn &layerDefn = outLayer.GetLayerDefn();
int idx = layerDefn.GetFieldIndex(GetParameterStringList("featout")[i].c_str());
......
......@@ -148,7 +148,7 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>::Trai
regularization.SetSize(s_nbneuron.size());
rho.SetSize(s_nbneuron.size());
beta.SetSize(s_nbneuron.size());
for (int i=0; i<s_nbneuron.size(); i++){
for (unsigned int i=0; i<s_nbneuron.size(); i++){
nb_neuron[i]=std::stoi(s_nbneuron[i]);
noise[i]=std::stof(s_noise[i]);
regularization[i]=std::stof(s_regularization[i]);
......
......@@ -125,14 +125,14 @@ void TrainDimensionalityReductionApplicationBase<TInputValue,TOutputValue>
dimredTrainer->SetMaxWeight(GetParameterFloat("algorithm.som.iv"));
typename TemplateEstimatorType::SizeType size;
std::vector<std::basic_string<char>> s= GetParameterStringList("algorithm.som.s");
for (int i=0; i<dim; i++){
for (unsigned int i=0; i<dim; i++){
size[i]=std::stoi(s[i]);
}
dimredTrainer->SetMapSize(size);
typename TemplateEstimatorType::SizeType radius;
std::vector<std::basic_string<char>> n= GetParameterStringList("algorithm.som.n");
for (int i=0; i<dim; i++){
for (unsigned int i=0; i<dim; i++){
radius[i]=std::stoi(n[i]);
}
dimredTrainer->SetNeighborhoodSizeInit(radius);
......
......@@ -4,10 +4,21 @@
#include "otbMachineLearningModelTraits.h"
#include "otbMachineLearningModel.h"
#include <fstream>
#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otb_shark.h"
#include <shark/Algorithms/StoppingCriteria/AbstractStoppingCriterion.h>
#include <shark/Models/FFNet.h>
#include <shark/Models/Autoencoder.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb
{
template <class TInputValue, class NeuronType>
......
#ifndef AutoencoderModel_txx
#define AutoencoderModel_txx
#include "otbAutoencoderModel.h"
#include <fstream>
#include <shark/Data/Dataset.h>
#include "itkMacro.h"
#include "otbSharkUtils.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otbSharkUtils.h"
//include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h>
#include <shark/ObjectiveFunctions/SparseAutoencoderError.h>//the error function performing the regularisation of the hidden neurons
......@@ -20,6 +28,9 @@
#include <shark/Algorithms/StoppingCriteria/TrainingProgress.h> //Stops when the algorithm seems to converge, Tracks the progress of the training error over a period of time
#include <shark/Algorithms/GradientDescent/SteepestDescent.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb
{
......@@ -46,7 +57,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Train()
shark::Data<shark::RealVector> inputSamples_copy = inputSamples;
std::ofstream ofs;
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
ofs.open(m_LearningCurveFileName);
ofs << "learning curve" << std::endl;
......@@ -176,7 +187,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
optimizer.init(error);
std::cout<<"error before training : " << optimizer.solution().value<<std::endl;
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
File << "end layer" << std::endl;
}
......@@ -185,7 +196,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneLayer(shark::AbstractStop
do{
i++;
optimizer.step(error);
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
File << optimizer.solution().value << std::endl;
}
......@@ -252,12 +263,12 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
i++;
optimizer.step(error);
std::cout<<"error after " << i << "iterations : " << optimizer.solution().value <<std::endl;
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
File << optimizer.solution().value << std::endl;
}
} while( !criterion.stop( optimizer.solution() ) );
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
File << "end layer" << std::endl;
}
......@@ -272,7 +283,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainOneSparseLayer(shark::Abstra
template <class TInputValue, class NeuronType>
template <class T>
void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double rho,double beta, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStoppingCriterion<T> & criterion,double /*rho*/,double /*beta*/, double regularization, shark::Data<shark::RealVector> &samples, std::ostream& File)
{
shark::LabeledData<shark::RealVector,shark::RealVector> trainSet(samples,samples);//labels identical to inputs
......@@ -292,7 +303,7 @@ void AutoencoderModel<TInputValue,NeuronType>::TrainNetwork(shark::AbstractStopp
i++;
optimizer.step(error);
std::cout<<"error after " << i << "iterations : " << optimizer.solution().value<<std::endl;
if (this->m_WriteLearningCurve =true)
if (this->m_WriteLearningCurve == true)
{
File << optimizer.solution().value << std::endl;
}
......@@ -319,13 +330,13 @@ bool AutoencoderModel<TInputValue,NeuronType>::CanReadFile(const std::string & f
template <class TInputValue, class NeuronType>
bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & filename)
bool AutoencoderModel<TInputValue,NeuronType>::CanWriteFile(const std::string & /*filename*/)
{
return true;
}
template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & name)
void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename, const std::string & /*name*/)
{
std::cout << "saving model ..." << std::endl;
std::ofstream ofs(filename);
......@@ -382,7 +393,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Save(const std::string & filename
}
template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & name)
void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename, const std::string & /*name*/)
{
NetworkType net;
......@@ -421,7 +432,7 @@ void AutoencoderModel<TInputValue,NeuronType>::Load(const std::string & filename
template <class TInputValue, class NeuronType>
typename AutoencoderModel<TInputValue,NeuronType>::TargetSampleType
AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{
shark::RealVector samples(value.Size());
......@@ -453,7 +464,7 @@ AutoencoderModel<TInputValue,NeuronType>::DoPredict(const InputSampleType & valu
template <class TInputValue, class NeuronType>
void AutoencoderModel<TInputValue,NeuronType>
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
{
std::vector<shark::RealVector> features;
......
......@@ -2,8 +2,8 @@
#define AutoencoderModelFactory_h
#include <shark/Models/TiedAutoencoder.h>
#include <shark/Models/Autoencoder.h>
//#include <shark/Models/TiedAutoencoder.h>
//#include <shark/Models/Autoencoder.h>
#include "itkObjectFactoryBase.h"
#include "itkImageIOBase.h"
......
......@@ -20,9 +20,9 @@
#include "otbAutoencoderModelFactory.h"
#include "otbAutoencoderModel.h"
#include "itkCreateObjectFunction.h"
#include "otbAutoencoderModel.h"
#include "itkVersion.h"
namespace otb
......
......@@ -111,9 +111,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
// Define iterators
typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
//typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType;
typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
//typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
InputIteratorType inIt(inputPtr, outputRegionForThread);
OutputIteratorType outIt(outputPtr, outputRegionForThread);
......@@ -155,9 +155,9 @@ ImageDimensionalityReductionFilter<TInputImage, TOutputImage, TMaskImage>
// Define iterators
typedef itk::ImageRegionConstIterator<InputImageType> InputIteratorType;
typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
//typedef itk::ImageRegionConstIterator<MaskImageType> MaskIteratorType;
typedef itk::ImageRegionIterator<OutputImageType> OutputIteratorType;
typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
//typedef itk::ImageRegionIterator<ConfidenceImageType> ConfidenceMapIteratorType;
InputIteratorType inIt(inputPtr, outputRegionForThread);
OutputIteratorType outIt(outputPtr, outputRegionForThread);
......
......@@ -4,7 +4,17 @@
#include "otbMachineLearningModelTraits.h"
#include "otbMachineLearningModel.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otb_shark.h"
#include <shark/Algorithms/Trainers/PCA.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb
{
......
......@@ -2,9 +2,16 @@
#ifndef PCAModel_txx
#define PCAModel_txx
#include "otbPCAModel.h"
#include <fstream>
#include <shark/Data/Dataset.h>
#include "itkMacro.h"
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
#endif
#include "otbSharkUtils.h"
//include train function
#include <shark/ObjectiveFunctions/ErrorFunction.h>
......@@ -13,6 +20,9 @@
#include <shark/ObjectiveFunctions/Regularizer.h> //L2 regulariziation
#include <shark/ObjectiveFunctions/ErrorFunction.h>
#if defined(__GNUC__) || defined(__clang__)
#pragma GCC diagnostic pop
#endif
namespace otb
{
......@@ -65,13 +75,13 @@ bool PCAModel<TInputValue>::CanReadFile(const std::string & filename)
template <class TInputValue>
bool PCAModel<TInputValue>::CanWriteFile(const std::string & filename)
bool PCAModel<TInputValue>::CanWriteFile(const std::string & /*filename*/)
{
return true;
}
template <class TInputValue>
void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & name)
void PCAModel<TInputValue>::Save(const std::string & filename, const std::string & /*name*/)
{
std::ofstream ofs(filename);
//ofs << m_encoder.name() << std::endl; //first line
......@@ -98,7 +108,7 @@ otxt.close();
}
template <class TInputValue>
void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & name)
void PCAModel<TInputValue>::Load(const std::string & filename, const std::string & /*name*/)
{
std::ifstream ifs(filename);
char encoder[256];
......@@ -130,7 +140,7 @@ void PCAModel<TInputValue>::Load(const std::string & filename, const std::string
template <class TInputValue>
typename PCAModel<TInputValue>::TargetSampleType
PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{
shark::RealVector samples(value.Size());
for(size_t i = 0; i < value.Size();i++)
......@@ -156,7 +166,7 @@ PCAModel<TInputValue>::DoPredict(const InputSampleType & value, ConfidenceValueT
template <class TInputValue>
void PCAModel<TInputValue>
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * quality) const
::DoPredictBatch(const InputListSampleType *input, const unsigned int & startIndex, const unsigned int & size, TargetListSampleType * targets, ConfidenceListSampleType * /*quality*/) const
{
std::vector<shark::RealVector> features;
......
#ifndef SOMModel_txx
#define SOMModel_txx
#include "otbSOMModel.h"
#include "otbImageFileReader.h"
#include "otbImageFileWriter.h"
......@@ -68,7 +70,7 @@ bool SOMModel<TInputValue, MapDimension>::CanReadFile(const std::string & filena
template <class TInputValue, unsigned int MapDimension>
bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & filename)
bool SOMModel<TInputValue, MapDimension>::CanWriteFile(const std::string & /*filename*/)
{
return true;
}
......@@ -91,7 +93,7 @@ std::istream & binary_read(std::istream& stream, T& value){
template <class TInputValue, unsigned int MapDimension>
void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & name)
void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, const std::string & /*name*/)
{
itk::ImageRegionConstIterator<MapType> inputIterator(m_SOMMap,m_SOMMap->GetLargestPossibleRegion());
inputIterator.GoToBegin();
......@@ -133,7 +135,7 @@ void SOMModel<TInputValue, MapDimension>::Save(const std::string & filename, con
}
template <class TInputValue, unsigned int MapDimension>
void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & name)
void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, const std::string & /*name*/)
{
std::ifstream ifs(filename, std::ios::binary);
......@@ -154,7 +156,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
SizeType size;
itk::Index< MapDimension > index;
for (int i=0 ; i<MapDimension; i++)
for (unsigned int i=0 ; i<MapDimension; i++)
{
binary_read(ifs,size[i]);
index[i]=0;
......@@ -174,7 +176,7 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
std::string value;
while(!outputIterator.IsAtEnd()){
InputSampleType vect(numberOfElements);
for (int i=0 ; i<numberOfElements; i++)
for (unsigned int i=0 ; i<numberOfElements; i++)
{
float v; // InputValue type is not the same during training anddimredvector.
binary_read(ifs,v);
......@@ -191,13 +193,13 @@ void SOMModel<TInputValue, MapDimension>::Load(const std::string & filename, con
template <class TInputValue, unsigned int MapDimension>
typename SOMModel<TInputValue, MapDimension>::TargetSampleType
SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * quality) const
SOMModel<TInputValue, MapDimension>::DoPredict(const InputSampleType & value, ConfidenceValueType * /*quality*/) const
{
TargetSampleType target;
target.SetSize(this->m_Dimension);
auto winner =m_SOMMap->GetWinner(value);
for (int i=0; i< this->m_Dimension ;i++) {
for (unsigned int i=0; i< this->m_Dimension ;i++) {
target[i] = winner.GetElement(i);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment