diff --git a/python_src/lib/.pylintrc b/python_src/lib/.pylintrc
new file mode 100644
index 0000000000000000000000000000000000000000..7615b8695ea3c2c8f107e43e9edb82815c51c565
--- /dev/null
+++ b/python_src/lib/.pylintrc
@@ -0,0 +1,2 @@
+[FORMAT]
+max-line-length=120
diff --git a/python_src/processings/__init__.py b/python_src/lib/__init__.py
similarity index 100%
rename from python_src/processings/__init__.py
rename to python_src/lib/__init__.py
diff --git a/python_src/lib/diapOTB.py b/python_src/lib/diapOTB.py
new file mode 100644
index 0000000000000000000000000000000000000000..171b48acd6d290734c72ff89dcd6b9af8af6017a
--- /dev/null
+++ b/python_src/lib/diapOTB.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os
+import argparse
+
+from processing.core.ConfigFile import ConfigFile, ConfigParamPattern
+from processing.core.DiapOTBEnums import ChainNames, ChainModes, ScriptNames, Satellite
+from processing.DInSAR import DInSarParamOthers
+from processing.PreProcessing import PreProcessingParamOthers
+from processing.PostProcessing import PostProcessingParamOthers
+from processing.Ground import GroundParamOthers
+
+from processing.DiapOTBProcessingFactory import DiapOTBProcessingFactory
+
+import processing.core.Utils as utils
+
+
+def prepare_output(output_dir):
+    """Prepare output directory to store the current processing
+    """
+    if not os.path.exists(output_dir):
+        print("The output directory does not exist and will be created")
+        os.makedirs(output_dir)
+    else:
+        print("The output directory exists. Some files can be overwritten")
+
+
+if __name__ == "__main__":
+
+    # Check arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("configfile", help="input conguration file for the application DiapOTB")
+    args = parser.parse_args()
+    print(args.configfile)
+
+    # Check and load configuration file
+    config_handler = ConfigFile(args.configfile, str(ScriptNames.SIMPLE_S1SM))
+    config_handler.load_configfile()
+
+    # Prepare output and logger
+    output_dir = config_handler.get_output_dir()
+    print(output_dir)
+
+    prepare_output(output_dir)
+
+    utils.init_logger()
+    utils.init_filelog(output_dir)
+
+
+    # Retrieve parameters from configuration file
+    param_dict = config_handler.create_param_dict_from_config_file(DInSarParamOthers,
+                                                                   str(ChainNames.DINSAR))
+    print(param_dict)
+
+    param_dict_pre = config_handler.create_param_dict_from_config_file(PreProcessingParamOthers,
+                                                                       str(ChainNames.PRE_PROCESSING))
+
+    # Append param dict with doppler_file
+    param_dict_pre[str(ConfigParamPattern.DOPFILE)] = config_handler.get_doppler_file()
+    print(param_dict_pre)
+
+    param_dict_post = config_handler.create_param_dict_from_config_file(PostProcessingParamOthers,
+                                                                        str(ChainNames.POST_PROCESSING))
+    print(param_dict_post)
+
+    param_dict_ground = config_handler.create_param_dict_from_config_file(GroundParamOthers,
+                                                                          str(ChainNames.GROUND))
+    print(param_dict_ground)
+
+
+    # Get and check main inputs : reference/secondary + dem and eof_path (if present)
+    reference_path = config_handler.get_reference_image()
+    secondary_path = config_handler.get_secondary_image()
+    reference_dir = os.path.dirname(reference_path)
+    secondary_dir = os.path.dirname(secondary_path)
+    reference_name = os.path.basename(reference_path)
+    secondary_name = os.path.basename(secondary_path)
+
+    dem = config_handler.get_dem()
+    eof_path = config_handler.get_eof()
+
+    utils.check_image_format(reference_path)
+    utils.check_image_format(secondary_path)
+
+    # Check sensor
+    dict_kwl_reference = utils.get_image_kwl(reference_path)
+    dict_kwl_secondary = utils.get_image_kwl(secondary_path)
+
+    sensor = utils.get_sensor_from_kwl(dict_kwl_reference)
+
+
+    # Adapt geom files with eof files (if sensor is S1 and eof_paht not empty)
+    reference_name = utils.apply_eof_path_on_orbit(sensor, eof_path, reference_path,
+                                                   dict_kwl_reference, output_dir)
+    secondary_name = utils.apply_eof_path_on_orbit(sensor, eof_path, secondary_path,
+                                                   dict_kwl_secondary, output_dir)
+
+    # Adapt below processing following dem resolution and sensor
+    # Get information about DEM (spacing, size ..)
+    dict_dem_info = utils.get_dem_information(dem)
+    if dict_dem_info['estimatedGroundSpacingXDEM'] > 40. or dict_dem_info['estimatedGroundSpacingYDEM'] > 40.:
+        param_dict[str(DInSarParamOthers.ADVANTAGE)] = "correlation" # Correlation if resolution > 40 m
+        utils.print_on_std("Resolution of the input DEM is inferior to 40 meters : " \
+                           "A correlation will be used to correct all deformation grids")
+
+    if sensor == str(Satellite.CSK) and str(DInSarParamOthers.ADVANTAGE) not in param_dict:
+         # Correlation if CSK
+        param_dict[str(DInSarParamOthers.ADVANTAGE)] = "correlation"
+
+
+    ### Processing ###
+        # TODO : Gather processing part with diapOTB (S1 IW)
+    utils.print_on_std("\n Beginning of DiapOTB processing (S1 SM or Cosmo mode) \n")
+
+    # Create our factory to build all processing following the current mode
+    chain_factory = DiapOTBProcessingFactory(mode=ChainModes.OTHERS)
+
+
+    utils.print_on_std("\n Pre_Processing on reference image \n")
+
+    pre_procesing_chain_reference = chain_factory.create_processing(str(ChainNames.PRE_PROCESSING),
+                                                                    image=reference_name,
+                                                                    image_dir=reference_dir,
+                                                                    param=param_dict_pre,
+                                                                    output_dir=output_dir)
+
+    pre_procesing_chain_reference.execute()
+
+    utils.print_on_std("\n Pre_Processing on secondary image \n")
+
+    pre_procesing_chain_secondary = chain_factory.create_processing(str(ChainNames.PRE_PROCESSING),
+                                                                    image=secondary_name,
+                                                                    image_dir=secondary_dir,
+                                                                    param=param_dict_pre,
+                                                                    output_dir=output_dir)
+
+    pre_procesing_chain_secondary.execute()
+
+    utils.print_on_std("\n Ground projection on reference image \n")
+
+    ground_chain_reference = chain_factory.create_processing(str(ChainNames.GROUND),
+                                                             image=reference_name,
+                                                             image_dir=reference_dir,
+                                                             param=param_dict_ground,
+                                                             output_dir=output_dir)
+
+    ground_chain_reference.append_inputs(pre_procesing_chain_reference.get_outputs())
+
+    ground_chain_reference.execute(dem=dem)
+
+    utils.print_on_std("\n Ground projection on secondary image \n")
+
+    # Change cartesian estimation to False
+    param_dict_ground[str(GroundParamOthers.CARTESIAN_ESTIMATION)] = False
+    ground_chain_secondary = chain_factory.create_processing(str(ChainNames.GROUND),
+                                                             image=secondary_name,
+                                                             image_dir=secondary_dir,
+                                                             param=param_dict_ground,
+                                                             output_dir=output_dir)
+
+    ground_chain_secondary.append_inputs(pre_procesing_chain_secondary.get_outputs())
+
+    ground_chain_secondary.execute(dem=dem)
+
+    utils.print_on_std("\n DINSAR Processing \n")
+
+    dinsar_chain = chain_factory.create_processing(str(ChainNames.DINSAR),
+                                                   secondary_image=secondary_name,
+                                                   secondary_dir=secondary_dir,
+                                                   reference_image=reference_name,
+                                                   reference_dir=reference_dir,
+                                                   param=param_dict,
+                                                   output_dir=output_dir)
+
+    dinsar_chain.append_inputs_reference(pre_procesing_chain_reference.get_outputs())
+    dinsar_chain.append_inputs_secondary(pre_procesing_chain_secondary.get_outputs())
+    dinsar_chain.append_inputs_reference(ground_chain_reference.get_outputs())
+    dinsar_chain.append_inputs_secondary(ground_chain_secondary.get_outputs())
+
+    dinsar_chain.execute(dem=dem)
+
+
+    utils.print_on_std("\n Post_Processing \n")
+
+    # TODO : Add Ortho processing (in PostProcessing or external chain ???)
+    post_processing_chain = chain_factory.create_processing(str(ChainNames.POST_PROCESSING),
+                                                            secondary_image=secondary_name,
+                                                            secondary_dir=secondary_dir,
+                                                            reference_image=reference_name,
+                                                            reference_dir=reference_dir,
+                                                            param=param_dict_post,
+                                                            output_dir=output_dir)
+
+    post_processing_chain.append_inputs_reference(ground_chain_reference.get_outputs())
+    post_processing_chain.append_inputs(dinsar_chain.get_outputs())
+
+    post_processing_chain.execute()
+
+    utils.print_on_std("\n End of DiapOTB processing (S1 SM or Cosmo mode) \n")
diff --git a/python_src/lib/diapOTB_S1IW.py b/python_src/lib/diapOTB_S1IW.py
new file mode 100644
index 0000000000000000000000000000000000000000..73bb2829f4b7217b9125695ade40e9933d29a81b
--- /dev/null
+++ b/python_src/lib/diapOTB_S1IW.py
@@ -0,0 +1,236 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os
+import argparse
+
+from processing.core.ConfigFile import ConfigFile, ConfigParamPattern
+from processing.core.DiapOTBEnums import ChainNames, ChainModes, ScriptNames
+from processing.DInSAR import DInSarParamS1IW
+from processing.PreProcessing import PreProcessingParamS1IW
+from processing.PostProcessing import PostProcessingParamS1IW
+from processing.Ground import GroundParamS1IW
+
+from processing.DiapOTBProcessingFactory import DiapOTBProcessingFactory
+
+import processing.core.Utils as utils
+
+
+def prepare_output(output_dir):
+    """Prepare output directory to store the current processing
+    """
+    if not os.path.exists(output_dir):
+        print("The output directory does not exist and will be created")
+        os.makedirs(output_dir)
+    else :
+        print("The output directory exists. Some files can be overwritten")
+
+def prepare_burst_dir(output_dir, burst_list, esd_nbiter):
+    """Prepare burst directory to store the current processing
+    """
+    for burst_id in burst_list:
+        burst_dir = os.path.join(output_dir, "burst" + str(burst_id))
+
+        if not os.path.exists(burst_dir):
+            os.makedirs(burst_dir)
+
+            if esd_nbiter > 0:
+                os.makedirs(os.path.join(burst_dir, "esd"))
+
+
+if __name__ == "__main__":
+
+    # Check arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("configfile", help="input conguration file for the application DiapOTB")
+    args = parser.parse_args()
+    print(args.configfile)
+
+    # Check and load configuration file
+    config_handler = ConfigFile(args.configfile, str(ScriptNames.SIMPLE_S1IW))
+    config_handler.load_configfile()
+
+    # Prepare output and logger
+    output_dir =  config_handler.get_output_dir()
+    print(output_dir)
+
+    prepare_output(output_dir)
+
+    utils.init_logger()
+    utils.init_filelog(output_dir)
+
+
+    # Retrieve parameters from configuration file
+    param_dict = config_handler.create_param_dict_from_config_file(DInSarParamS1IW,
+                                                                   str(ChainNames.DINSAR))
+    print(param_dict)
+
+    param_dict_pre = config_handler.create_param_dict_from_config_file(PreProcessingParamS1IW,
+                                                                       str(ChainNames.PRE_PROCESSING))
+
+    # Append param dict with doppler_file
+    param_dict_pre[str(ConfigParamPattern.DOPFILE)] = config_handler.get_doppler_file()
+    print(param_dict_pre)
+
+    param_dict_post = config_handler.create_param_dict_from_config_file(PostProcessingParamS1IW,
+                                                                        str(ChainNames.POST_PROCESSING))
+    print(param_dict_post)
+
+    param_dict_ground = config_handler.create_param_dict_from_config_file(GroundParamS1IW,
+                                                                          str(ChainNames.GROUND))
+    print(param_dict_ground)
+
+
+    # Get and check main inputs : reference/secondary + dem and eof_path (if present)
+    reference_path = config_handler.get_reference_image()
+    secondary_path = config_handler.get_secondary_image()
+    reference_dir = os.path.dirname(reference_path)
+    secondary_dir = os.path.dirname(secondary_path)
+    reference_name = os.path.basename(reference_path)
+    secondary_name = os.path.basename(secondary_path)
+
+    dem = config_handler.get_dem()
+    eof_path = config_handler.get_eof()
+
+    utils.check_image_format(reference_path)
+    utils.check_image_format(secondary_path)
+
+    # Check sensor
+    dict_kwl_reference = utils.get_image_kwl(reference_path)
+    dict_kwl_secondary = utils.get_image_kwl(secondary_path)
+
+    sensor = utils.get_sensor_from_kwl(dict_kwl_reference)
+
+
+    # Adapt geom files with eof files (if sensor is S1 and eof_paht not empty)
+    reference_name = utils.apply_eof_path_on_orbit(sensor, eof_path, reference_path,
+                                                   dict_kwl_reference, output_dir)
+    secondary_name = utils.apply_eof_path_on_orbit(sensor, eof_path, secondary_path,
+                                                   dict_kwl_secondary, output_dir)
+
+
+    # Adapt below processing following dem resolution and sensor
+    # Get information about DEM (spacing, size ..)
+    dict_dem_info = utils.get_dem_information(dem)
+    if dict_dem_info['estimatedGroundSpacingXDEM'] > 40. or dict_dem_info['estimatedGroundSpacingYDEM'] > 40. :
+        param_dict[str(DInSarParamS1IW.ADVANTAGE)] = "correlation" # Correlation if resolution > 40 m
+        utils.print_on_std("Resolution of the input DEM is inferior to 40 meters : " \
+                           "A correlation will be used to correct all deformation grids")
+
+
+    # Get burst list from configuration file (for reference image)
+    # reference and secondary images may rarely have different burst match
+    # for instance,  burst 0 in reference image can match with burst 2 in secondary instead of id 0
+    # That's why a burst to process can be different than burst ids
+    burst_list = config_handler.get_burst_list()
+    valid_burst_reference, valid_burst_secondary = utils.select_burst(dict_kwl_reference, dict_kwl_secondary,
+                                                                      min(burst_list), max(burst_list))
+
+    # Add burst id and burst to process to all param
+    # "burst_ids" is always equal to reference burst
+    # "burst_to_process" is equal to reference burst except for pre-processing chain
+    # (burst extraction) for secondary
+    param_dict_pre["burst_ids"] = valid_burst_reference
+    param_dict_pre["burst_to_extract"] = valid_burst_reference
+    param_dict_pre_secondary = param_dict_pre.copy()
+    param_dict_pre_secondary["burst_ids"] = valid_burst_reference
+    param_dict_pre_secondary["burst_to_extract"] = valid_burst_secondary
+    param_dict_ground["burst_ids"] = valid_burst_reference
+    param_dict["burst_ids"] = valid_burst_reference
+
+
+    # Prepare output directory for each burst
+    prepare_burst_dir(output_dir, valid_burst_reference, param_dict[str(DInSarParamS1IW.ESD_ITER)])
+    print(valid_burst_reference)
+    print(valid_burst_secondary)
+
+
+    # TODO : Gather processing part with diapOTB (S1SM and CSK sensor)
+    ### Processing ###
+    utils.print_on_std("\n Beginning of DiapOTB processing (S1 IW mode) \n")
+
+    # Create our factory to build all processing following the current mode
+    chain_factory = DiapOTBProcessingFactory(mode=ChainModes.S1_IW)
+
+
+    utils.print_on_std("\n Pre_Processing on reference image \n")
+
+    pre_procesing_chain_reference = chain_factory.create_processing(str(ChainNames.PRE_PROCESSING),
+                                                                    image=reference_name,
+                                                                    image_dir=reference_dir,
+                                                                    param=param_dict_pre,
+                                                                    output_dir=output_dir)
+
+    pre_procesing_chain_reference.execute()
+
+    utils.print_on_std("\n Pre_Processing on secondary image \n")
+
+    pre_procesing_chain_secondary = chain_factory.create_processing(str(ChainNames.PRE_PROCESSING),
+                                                                    image=secondary_name,
+                                                                    image_dir=secondary_dir,
+                                                                    param=param_dict_pre_secondary,
+                                                                    output_dir=output_dir)
+
+    pre_procesing_chain_secondary.execute()
+
+    utils.print_on_std("\n Ground projection on reference image \n")
+
+    ground_chain_reference = chain_factory.create_processing(str(ChainNames.GROUND),
+                                                             image=reference_name,
+                                                             image_dir=reference_dir,
+                                                             param=param_dict_ground,
+                                                             output_dir=output_dir)
+
+    ground_chain_reference.append_inputs(pre_procesing_chain_reference.get_outputs())
+
+    ground_chain_reference.execute(dem=dem)
+
+    utils.print_on_std("\n Ground projection on secondary image \n")
+
+    # Change cartesian estimation to False
+    param_dict_ground[str(GroundParamS1IW.CARTESIAN_ESTIMATION)] = False
+    ground_chain_secondary = chain_factory.create_processing(str(ChainNames.GROUND),
+                                                             image=secondary_name,
+                                                             image_dir=secondary_dir,
+                                                             param=param_dict_ground,
+                                                             output_dir=output_dir)
+
+    ground_chain_secondary.append_inputs(pre_procesing_chain_secondary.get_outputs())
+
+    ground_chain_secondary.execute(dem=dem)
+
+    utils.print_on_std("\n DINSAR Processing \n")
+
+    dinsar_chain = chain_factory.create_processing(str(ChainNames.DINSAR),
+                                                   secondary_image=secondary_name,
+                                                   secondary_dir=secondary_dir,
+                                                   reference_image=reference_name,
+                                                   reference_dir=reference_dir,
+                                                   param=param_dict,
+                                                   output_dir=output_dir)
+
+    dinsar_chain.append_inputs_reference(pre_procesing_chain_reference.get_outputs())
+    dinsar_chain.append_inputs_secondary(pre_procesing_chain_secondary.get_outputs())
+    dinsar_chain.append_inputs_reference(ground_chain_reference.get_outputs())
+    dinsar_chain.append_inputs_secondary(ground_chain_secondary.get_outputs())
+
+    dinsar_chain.execute(dem=dem)
+
+
+    utils.print_on_std("\n Post_Processing \n")
+
+    # TODO : Add Ortho processing (in PostProcessing or external chain ???)
+    post_processing_chain = chain_factory.create_processing(str(ChainNames.POST_PROCESSING),
+                                                            secondary_image=secondary_name,
+                                                            secondary_dir=secondary_dir,
+                                                            reference_image=reference_name,
+                                                            reference_dir=reference_dir,
+                                                            param=param_dict_post,
+                                                            output_dir=output_dir)
+
+    post_processing_chain.append_inputs_reference(ground_chain_reference.get_outputs())
+    post_processing_chain.append_inputs(dinsar_chain.get_outputs())
+
+    post_processing_chain.execute()
+
+    utils.print_on_std("\n End of DiapOTB processing (S1 IW mode) \n")
diff --git a/python_src/lib/processing/DInSAR.py b/python_src/lib/processing/DInSAR.py
new file mode 100644
index 0000000000000000000000000000000000000000..72622e36a313b336c67a2de7824ec37357477d71
--- /dev/null
+++ b/python_src/lib/processing/DInSAR.py
@@ -0,0 +1,868 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+DInSar chain
+"""
+
+import os
+
+from .core.DiapOTBProcessing import DiapOTBProcessingDualImages, \
+    ParamHandler, FilenamesHandler, \
+    ExecutorDualImages
+from .core.DiapOTBEnums import ExtendedEnum, ChainModes, \
+    ExtPosition, FilenamesEnum, \
+    RequiredKeysForDualImagesProcessing, extend_enum, DefaultEnum
+from .core.DiapOTBExceptions import DiapOTBException
+
+from .PreProcessing import PreProcessingOutputKeys
+from .Ground import GroundOutputKeys
+
+from .core.ApplicationWrapper import OTBApplicationWrapper
+from .core.ConfigFile import ConfigParamPattern
+
+# pylint: disable=too-few-public-methods
+# Specific enums
+class DInSarParamOthers(ExtendedEnum):
+    """Define each required parameters for DINSAR chain
+    """
+    MLAZI = str(ConfigParamPattern.MLAZI)
+    MLRAN = str(ConfigParamPattern.MLRAN)
+    MLAZI_GRID = "grid_ml_azi"
+    MLRAN_GRID = "grid_ml_ran"
+    GRID_STEP_RAN = str(ConfigParamPattern.GRIDSTEPRAN)
+    GRID_STEP_AZI = str(ConfigParamPattern.GRIDSTEPAZI)
+    GRID_THRESHOLD = str(ConfigParamPattern.GRIDTHES)
+    GRID_GAP = str(ConfigParamPattern.GRIDGAP)
+    INTERF_GAIN = str(ConfigParamPattern.INTERFGAIN)
+    ADVANTAGE = "advantage"
+    ML_INTERFRAN = str(ConfigParamPattern.INTERFMLRAN)
+    ML_INTERFAZI = str(ConfigParamPattern.INTERFMLAZI)
+
+
+@extend_enum(DInSarParamOthers)
+class DInSarParamS1IW(ExtendedEnum):
+    """Define each required parameters for DINSAR chain
+    (Others keys + the following ones)
+    """
+    ESD_ITER = str(ConfigParamPattern.ESDITER)
+    ESD_AUTOMODE = "esd_AutoMode"
+    BURSTIDS = "burst_ids"
+    WITH_INTERF = "with_interferogram"
+    WITH_CONCATENATION = "with_concatenation"
+
+
+class DInSarParamDefaultValueOthers(DefaultEnum):
+    """Define some default values or redirect to other values
+    The following paramaters are optional (other are mandatory)
+    """
+    ML_INTERFRAN = (str(ConfigParamPattern.INTERFMLRAN), str(ConfigParamPattern.MLRAN))
+    ML_INTERFAZI = (str(ConfigParamPattern.INTERFMLAZI), str(ConfigParamPattern.MLAZI))
+    MLRAN_GRID = (str(DInSarParamOthers.MLRAN_GRID), str(ConfigParamPattern.MLRAN))
+    MLAZI_GRID = (str(DInSarParamOthers.MLAZI_GRID), str(ConfigParamPattern.MLAZI))
+    ADVANTAGE = (str(DInSarParamOthers.ADVANTAGE), "projection")
+    
+
+class DInSarParamDefaultValueS1IW(DefaultEnum):
+    """Define some default values or redirect to other values
+        The following paramaters are optional (other are mandatory)
+    """
+    ML_INTERFRAN = (str(ConfigParamPattern.INTERFMLRAN), str(ConfigParamPattern.MLRAN))
+    ML_INTERFAZI = (str(ConfigParamPattern.INTERFMLAZI), str(ConfigParamPattern.MLAZI))
+    MLRAN_GRID = (str(DInSarParamOthers.MLRAN_GRID), str(ConfigParamPattern.MLRAN))
+    MLAZI_GRID = (str(DInSarParamOthers.MLAZI_GRID), str(ConfigParamPattern.MLAZI))
+    ADVANTAGE = (str(DInSarParamOthers.ADVANTAGE), "projection")
+    ESD_AUTOMODE = (str(DInSarParamS1IW.ESD_AUTOMODE), True)
+    WITH_INTERF = (str(DInSarParamS1IW.WITH_INTERF), True)
+    WITH_CONCATENATION = (str(DInSarParamS1IW.WITH_CONCATENATION), True)
+
+
+class DInSarFilenames(FilenamesEnum):
+    """Define key for intermediate/output filenames for DInSAR chain
+    3 str to speficy, a key, the extension to add and the position of the extension in order to create the file
+    """
+    FILES_AFTER_FINEGRID = ("grid_files", "fineDeformationGrid",
+                            str(ExtPosition.WHOLE))
+    FILES_AFTER_COREGISTRATION = ("coRe_files", "_coregistrated",
+                                  str(ExtPosition.SUFIX))
+    FILES_AFTER_DERAMP = ("coRe_deramp_files",
+                          "_coregistrated_reramp",
+                          str(ExtPosition.SUFIX))
+    FILES_AFTER_INTERF = ("interf_files", "interferogram",
+                          str(ExtPosition.WHOLE))
+    FILES_AFTER_CONCATENATE = ("interf_concatenate", "interferogram_swath",
+                               str(ExtPosition.EXCLUSIF))
+
+
+class DInSarInputKeysOthers(ExtendedEnum):
+    """Define intput keys for DINSAR chain (Others keys + the following ones)
+    """
+    DEM = "dem"
+    ML_REFERENCE = str(PreProcessingOutputKeys.ML) + "_reference"
+    DEMPROJ_REFERENCE = str(GroundOutputKeys.DEMPROJ) + "_reference"
+    CARTESIAN_ESTIMATION_REFERENCE = str(GroundOutputKeys.CARTESIAN_ESTIMATION) + "_reference"
+    DOP0_SECONDARY = str(PreProcessingOutputKeys.DOP0) + "_secondary"
+    ML_SECONDARY = str(PreProcessingOutputKeys.ML) + "_secondary"
+    DEMPROJ_SECONDARY = str(GroundOutputKeys.DEMPROJ) + "_secondary"
+
+
+@extend_enum(DInSarInputKeysOthers)
+class DInSarInputKeysS1IW(ExtendedEnum):
+    """Define intput keys for DINSAR chain (Others keys + the following ones)
+    """
+    BURSTS_REFERENCE = str(PreProcessingOutputKeys.BURSTS) + "_reference"
+    DERAMP_REFERENCE = str(PreProcessingOutputKeys.DERAMP) + "_reference"
+    BURSTS_SECONDARY = str(PreProcessingOutputKeys.BURSTS) + "_secondary"
+    DERAMP_SECONDARY = str(PreProcessingOutputKeys.DERAMP) + "_secondary"
+
+
+class DInSarOutputKeysOthers(ExtendedEnum):
+    """Define output keys for DINSAR chain
+    """
+    GRIDS = "grids_list"
+    INTERFERO = "interferogram"
+    COREGISTRATED_SECONDARY = "coregistrated_secondary"
+
+
+@extend_enum(DInSarOutputKeysOthers)
+class DInSarOutputKeysS1IW(ExtendedEnum):
+    """Define output keys for DInSAR chain (Others keys + the following ones)
+    """
+    COREGISTRATED_SECONDARY_RERAMP = "coregistrated_secondary_reramp"
+    REFERENCE_RERAMP = "reference_reramp"
+    INTERFEROS = "interferograms_list"
+
+
+# pylint: enable=too-few-public-methods
+
+
+# DInSAR class
+class DInSAR(DiapOTBProcessingDualImages):
+    """Use the module to launch DINSAR chain.
+
+    main function : execute
+    """
+    def __init__(self, **kwargs):
+        # Base constructor to init required elts such as image/dir, output_dir and parameters
+        super().__init__(**kwargs)
+
+        # Init the specific arguments for the DINSAR chain
+        self._name = "DINSAR"
+        self._applications = ["SARFineDeformationGrid",
+                              "SARCoRegistration", "SARDeramp",
+                              "SARRobustInterferogram"]
+
+        self._mode = str(ChainModes.OTHERS)
+        if "mode" in kwargs and str(kwargs["mode"]) in ChainModes.list():
+            self._mode = str(kwargs["mode"])
+
+        print(self._mode)
+
+        # Init Handlers according to the mode
+        param_enum = DInSarParamOthers
+        file_enum = DInSarFilenames
+        default_param_enum = DInSarParamDefaultValueOthers
+        self._inputs_list = DInSarInputKeysOthers.list()
+
+        if self._mode == str(ChainModes.S1_IW):
+            param_enum = DInSarParamS1IW
+            default_param_enum = DInSarParamDefaultValueS1IW
+            self._inputs_list = DInSarInputKeysS1IW.list()
+
+        print(self._inputs_list)
+
+        self.param_handler = ParamHandler(param_enum,
+                                          self._param, default_param_enum)
+
+        self.file_handler_reference = FilenamesHandler(file_enum,
+                                                       self._reference_base,
+                                                       self._mode)
+
+        self.file_handler_secondary = FilenamesHandler(file_enum,
+                                                       self._secondary_base,
+                                                       self._mode)
+
+        # Get from parameter dictionary each argument
+        self.param_handler.check_param()
+
+    def retrieve_output(self, key):
+        """Retrieve a given output of the DInSAR chain
+        """
+        if not isinstance(key, DInSarOutputKeysS1IW) and key not in DInSarOutputKeysS1IW.list():
+            raise DiapOTBException("The current key is not a available "
+                                   "output key for PreProcessing chain")
+
+        return self._dict_outputs[str(key)]
+
+    # Process functions
+    def execute(self, **kwargs):
+        """ DInSAR chain
+        """
+
+        if str(self._mode) == str(ChainModes.OTHERS):
+            self._input_enum = DInSarInputKeysOthers
+        else:
+            self._input_enum = DInSarInputKeysS1IW
+
+        self._executor_builder.add_mode(ChainModes.OTHERS,
+                                        ExecutorDInSAROthers)
+
+        self._executor_builder.add_mode(ChainModes.S1_IW,
+                                        ExecutorDInSARS1IW)
+
+        super().execute(**kwargs)
+
+        print("Pouet !!!")
+
+
+# Executors, one per mode
+class ExecutorDInSAROthers(ExecutorDualImages):
+    """Execute processing for DINSAR chain mode S1SM-CSK
+    """
+    def _deformation_grid(self, output_dir):
+        """Execute SARFineDeformationGrid
+        """
+        # Get required inputs to build the fine deformation grid
+        # Inputs are list => retrieve 0 index
+        ml_reference = self._inputs[str(DInSarInputKeysOthers.ML_REFERENCE)][0]
+        ml_secondary = self._inputs[str(DInSarInputKeysOthers.ML_SECONDARY)][0]
+        demproj_reference = self._inputs[str(DInSarInputKeysOthers.DEMPROJ_REFERENCE)][0]
+        demproj_secondary = self._inputs[str(DInSarInputKeysOthers.DEMPROJ_SECONDARY)][0]
+
+        # Get output path
+        # pylint: disable=no-member
+        fine_grid_path = os.path.join(output_dir,
+                                      self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_FINEGRID.get_key()))
+        # pylint: enable=no-member
+
+        # Instanciate and execute SARFineDeformationGrid
+        app_grid = OTBApplicationWrapper("SARFineDeformationGrid")
+        app_grid.set_input_images(insarmaster=self.reference_path(),
+                                  insarslave=self.secondary_path(),
+                                  inmlmaster=ml_reference,
+                                  inmlslave=ml_secondary,
+                                  indemprojmaster=demproj_reference,
+                                  indemprojslave=demproj_secondary,
+                                  indem=self._inputs[str(DInSarInputKeysOthers.DEM)])
+        app_grid.set_parameters(mlran=self.param_handler.get_param(str(DInSarParamOthers.MLRAN_GRID)),
+                                mlazi=self.param_handler.get_param(str(DInSarParamOthers.MLAZI_GRID)),
+                                gridsteprange=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_AZI)),
+                                threshold=self.param_handler.get_param(str(DInSarParamOthers.GRID_THRESHOLD)),
+                                gap=self.param_handler.get_param(str(DInSarParamOthers.GRID_GAP)),
+                                advantage=self.param_handler.get_param(str(DInSarParamOthers.ADVANTAGE)))
+        app_grid.set_output_images(out=fine_grid_path)
+        app_grid.execute_app(in_memory=False)
+
+        # Return the output grid
+        return fine_grid_path
+
+    def _coregistration(self, output_dir, grid):
+        """CoRegistration
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        doppler0_secondary = self._inputs[str(DInSarInputKeysOthers.DOP0_SECONDARY)][0]
+
+        # Get output path
+        # pylint: disable=no-member
+        coregistred_image_path = os.path.join(output_dir,
+                                              self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_COREGISTRATION.get_key()))
+        # pylint: enable=no-member
+
+        # Define some hard-coded parameters
+        nb_ramps = 257
+        tiles_size = 50
+        margin = 7
+
+        # Instanciate and execute SARCoRegistration
+        app_core = OTBApplicationWrapper("SARCoRegistration")
+        app_core.set_input_images(insarmaster=self.reference_path(),
+                                  insarslave=self.secondary_path(),
+                                  ingrid=grid)
+        app_core.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_AZI)),
+                                doppler0=doppler0_secondary,
+                                sizetiles=tiles_size,
+                                margin=margin,
+                                nbramps=nb_ramps)
+        app_core.set_output_images(out=coregistred_image_path)
+        app_core.execute_app(in_memory=False)
+
+        # Return the output image
+        return coregistred_image_path
+
+    def _interferogram(self, output_dir, grid, coregisted):
+        """Build the interferogram
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        cartmean_reference = self._inputs[str(DInSarInputKeysOthers.CARTESIAN_ESTIMATION_REFERENCE)][0]
+
+        # Get output path
+        # pylint: disable=no-member
+        interferogram_path = os.path.join(output_dir,
+                                          self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key()))
+        # pylint: enable=no-member
+
+        # Define some hard-coded parameters
+        marginran = 1
+        marginazi = 1
+
+        # TODO : have different ml factors than processing
+        mlran = self.param_handler.get_param(str(DInSarParamOthers.MLRAN))
+        mlazi = self.param_handler.get_param(str(DInSarParamOthers.MLAZI))
+
+        # Instanciate and execute SARRobustInterferogram
+        app_interf = OTBApplicationWrapper("SARRobustInterferogram")
+        app_interf.set_input_images(insarmaster=self.reference_path(),
+                                    insarslave=self.secondary_path(),
+                                    ingrid=grid,
+                                    incoregistratedslave=coregisted,
+                                    incartmeanmaster=cartmean_reference)
+        app_interf.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamOthers.GRID_STEP_AZI)),
+                                  mlran=mlran,
+                                  mlazi=mlazi,
+                                  marginran=marginran, marginazi=marginazi,
+                                  gain=self.param_handler.get_param(str(DInSarParamOthers.INTERF_GAIN)))
+        app_interf.set_output_images(out=interferogram_path)
+        app_interf.execute_app(in_memory=False)
+
+        # Return the output interferogram
+        return interferogram_path
+
+    def execute(self):
+        """DInSar chain for S1SM and CSK sensors
+
+        xx applications are called here :
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+
+        # retrieve input : output_dir
+        output_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)]
+
+        # Empty lists
+        grid_list = []
+
+        self.file_handler_secondary.create_intermediate_names()
+
+        # SARFineDeformationGrid
+        grid = self._deformation_grid(output_dir)
+        grid_list.append(grid)
+
+        # SARCoRegistration
+        coregistred = self._coregistration(output_dir, grid)
+
+        # SARRobustInterferogram
+        interferogram = self._interferogram(output_dir, grid, coregistred)
+
+        # Assign outputs
+        self._outputs[str(DInSarOutputKeysOthers.GRIDS)] = grid_list
+        self._outputs[str(DInSarOutputKeysOthers.INTERFERO)] = interferogram
+        self._outputs[str(DInSarOutputKeysOthers.COREGISTRATED_SECONDARY)] = coregistred
+
+
+class ExecutorDInSARS1IW(ExecutorDualImages):
+    """Execute processing for DINSAR chain mode S1SM-CSK
+    """
+
+    def _deformation_grid(self, burst_dir, id_loop):
+        """Execute SARFineDeformationGrid
+        """
+        # Get required inputs to build the fine deformation grid
+        # Inputs are list => retrieve only the current index (burst)
+        deramp_reference = self._inputs[str(DInSarInputKeysS1IW.DERAMP_REFERENCE)][id_loop]
+        deramp_secondary = self._inputs[str(DInSarInputKeysS1IW.DERAMP_SECONDARY)][id_loop]
+        ml_reference = self._inputs[str(DInSarInputKeysS1IW.ML_REFERENCE)][id_loop]
+        ml_secondary = self._inputs[str(DInSarInputKeysS1IW.ML_SECONDARY)][id_loop]
+        demproj_reference = self._inputs[str(DInSarInputKeysS1IW.DEMPROJ_REFERENCE)][id_loop]
+        demproj_secondary = self._inputs[str(DInSarInputKeysS1IW.DEMPROJ_SECONDARY)][id_loop]
+
+        # Get output path
+        # pylint: disable=no-member
+        fine_grid_path = os.path.join(burst_dir,
+                                      self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_FINEGRID.get_key()))
+        # pylint: enable=no-member
+
+        # Instanciate and execute SARFineDeformationGrid
+        app_grid = OTBApplicationWrapper("SARFineDeformationGrid")
+        app_grid.set_input_images(insarmaster=deramp_reference,
+                                  insarslave=deramp_secondary,
+                                  inmlmaster=ml_reference,
+                                  inmlslave=ml_secondary,
+                                  indemprojmaster=demproj_reference,
+                                  indemprojslave=demproj_secondary,
+                                  indem=self._inputs[str(DInSarInputKeysS1IW.DEM)])
+        app_grid.set_parameters(mlran=self.param_handler.get_param(str(DInSarParamS1IW.MLRAN_GRID)),
+                                mlazi=self.param_handler.get_param(str(DInSarParamS1IW.MLAZI_GRID)),
+                                gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                threshold=self.param_handler.get_param(str(DInSarParamS1IW.GRID_THRESHOLD)),
+                                gap=self.param_handler.get_param(str(DInSarParamS1IW.GRID_GAP)),
+                                advantage=self.param_handler.get_param(str(DInSarParamS1IW.ADVANTAGE)))
+        app_grid.set_output_images(out=fine_grid_path)
+        app_grid.execute_app(in_memory=False)
+
+        # Return the output grid
+        return fine_grid_path
+
+    def _coregistration(self, burst_dir, id_loop, grid):
+        """CoRegistration
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        deramp_reference = self._inputs[str(DInSarInputKeysS1IW.DERAMP_REFERENCE)][id_loop]
+        deramp_secondary = self._inputs[str(DInSarInputKeysS1IW.DERAMP_SECONDARY)][id_loop]
+        doppler0_secondary = self._inputs[str(DInSarInputKeysS1IW.DOP0_SECONDARY)][id_loop]
+
+        # Get output path
+        # pylint: disable=no-member
+        coregistred_image_path = os.path.join(burst_dir,
+                                              self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_COREGISTRATION.get_key()))
+        # pylint: enable=no-member
+
+        # Define some hard-coded parameters
+        nb_ramps = 256*2*10+1
+        tiles_size = 50
+        margin = 7
+
+        # Instanciate and execute SARCoRegistration
+        app_core = OTBApplicationWrapper("SARCoRegistration")
+        app_core.set_input_images(insarmaster=deramp_reference,
+                                  insarslave=deramp_secondary,
+                                  ingrid=grid)
+        app_core.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                doppler0=doppler0_secondary,
+                                sizetiles=tiles_size,
+                                margin=margin,
+                                nbramps=nb_ramps)
+        app_core.set_output_images(out=coregistred_image_path)
+        app_core.execute_app(in_memory=False)
+
+        # Return the output image
+        return coregistred_image_path
+
+    def _reramp_coregistred(self, burst_dir, id_loop, grid, coregistred_image):
+        """Apply a reramping on coregistred_image following a defromation grid
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        deramp_secondary = self._inputs[str(DInSarInputKeysS1IW.DERAMP_SECONDARY)][id_loop]
+
+        # Get output path
+        # pylint: disable=no-member
+        coregistred_deramp_path = os.path.join(burst_dir,
+                                               self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_DERAMP.get_key()))
+        # pylint: enable=no-member
+
+
+        # Apply deramp application to reramp the image with a deformation grid
+        reramp = "true"
+        shift = "true"
+
+        # Instanciate and execute SARDeramp
+        app_deramp = OTBApplicationWrapper("SARDeramp")
+        app_deramp.set_input_images(in_=coregistred_image,
+                                    ingrid=grid,
+                                    inslave=deramp_secondary)
+        app_deramp.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                  reramp=reramp, shift=shift)
+        app_deramp.set_output_images(out=coregistred_deramp_path)
+        app_deramp.execute_app(in_memory=False)
+
+        # Return the output
+        return coregistred_deramp_path
+
+    def _interferogram(self, burst_dir, id_loop, grid, reramp_coregisted):
+        """Build the interferogram
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        burst_reference = self._inputs[str(DInSarInputKeysS1IW.BURSTS_REFERENCE)][id_loop]
+        burst_secondary = self._inputs[str(DInSarInputKeysS1IW.BURSTS_SECONDARY)][id_loop]
+        cartmean_reference = self._inputs[str(DInSarInputKeysS1IW.CARTESIAN_ESTIMATION_REFERENCE)][id_loop]
+
+        # Get output path
+        output_dir = burst_dir
+        esd_nbiter = self.param_handler.get_param(str(DInSarParamS1IW.ESD_ITER))
+        if esd_nbiter > 0:
+            output_dir = os.path.join(burst_dir, "esd")
+
+        # pylint: disable=no-member
+        interferogram_path = os.path.join(output_dir, self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key()))
+        # pylint: enable=no-member
+
+        # Define some hard-coded parameters
+        # ml factors have to be 1x1 here to call esd, then.
+        # margin ran and azi (use to apply an average to the final value) set to 1
+        mlran = 1
+        mlazi = 1
+
+        # Instanciate and execute SARRobustInterferogram
+        app_interf = OTBApplicationWrapper("SARRobustInterferogram")
+        app_interf.set_input_images(insarmaster=burst_reference,
+                                    insarslave=burst_secondary,
+                                    ingrid=grid,
+                                    incoregistratedslave=reramp_coregisted,
+                                    incartmeanmaster=cartmean_reference)
+        app_interf.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                  mlran=mlran,
+                                  mlazi=mlazi,
+                                  marginran=1, marginazi=1,
+                                  gain=self.param_handler.get_param(str(DInSarParamS1IW.INTERF_GAIN)))
+        app_interf.set_output_images(out=interferogram_path)
+        app_interf.execute_app(in_memory=False)
+
+        # Return the output interferogram
+        return interferogram_path
+
+    def _interferogram(self, burst_dir, id_loop, grid, reramp_coregisted):
+        """Build the interferogram
+        """
+        # Get required inputs to launch coregistration
+        # Inputs are list => retrieve only the current index (burst)
+        burst_reference = self._inputs[str(DInSarInputKeysS1IW.BURSTS_REFERENCE)][id_loop]
+        burst_secondary = self._inputs[str(DInSarInputKeysS1IW.BURSTS_SECONDARY)][id_loop]
+        cartmean_reference = self._inputs[str(DInSarInputKeysS1IW.CARTESIAN_ESTIMATION_REFERENCE)][id_loop]
+
+        # Get output path
+        output_dir = burst_dir
+        esd_nbiter = self.param_handler.get_param(str(DInSarParamS1IW.ESD_ITER))
+        if esd_nbiter > 0:
+            output_dir = os.path.join(burst_dir, "esd")
+
+        # pylint: disable=no-member
+        interferogram_path = os.path.join(output_dir, self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key()))
+        # pylint: enable=no-member
+
+        # Define some hard-coded parameters
+        # ml factors have to be 1x1 here to call esd, then.
+        # margin ran and azi (use to apply an average to the final value) set to 1
+        mlran = 1
+        mlazi = 1
+
+        # Instanciate and execute SARRobustInterferogram
+        app_interf = OTBApplicationWrapper("SARRobustInterferogram")
+        app_interf.set_input_images(insarmaster=burst_reference,
+                                    insarslave=burst_secondary,
+                                    ingrid=grid,
+                                    incoregistratedslave=reramp_coregisted,
+                                    incartmeanmaster=cartmean_reference)
+        app_interf.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                  mlran=mlran,
+                                  mlazi=mlazi,
+                                  marginran=1, marginazi=1,
+                                  gain=self.param_handler.get_param(str(DInSarParamS1IW.INTERF_GAIN)))
+        app_interf.set_output_images(out=interferogram_path)
+        app_interf.execute_app(in_memory=False)
+
+        # Return the output interferogram
+        return interferogram_path
+
+    def _concatenate(self, output_dir, interferogram_list, first_burst_index):
+        """Concatenate all interferograms
+        """
+        # Get required inputs
+        reference_image = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_IMAGE)]
+        reference_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_DIR)]
+
+        # Get output path
+        # pylint: disable=no-member
+        concatenate_path = os.path.join(output_dir, self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_CONCATENATE.get_key()))
+        # pylint: enable=no-member
+
+        # Instanciate and execute SARRobustInterferogram
+        app_concatenate = OTBApplicationWrapper("SARConcatenateBursts")
+        app_concatenate.set_input_images(insar=os.path.join(reference_dir, reference_image),
+                                         il=interferogram_list)
+        app_concatenate.set_parameters(burstindex=first_burst_index)
+        app_concatenate.set_output_images(out=concatenate_path)
+        app_concatenate.execute_app(in_memory=False)
+
+        # Return the output interferogram
+        return concatenate_path
+
+    def execute_one_burst(self, burst_dir, id_loop):
+        """Execute DInSAR chain for each burst
+        """
+        #self.file_handler_secondary.create_intermediate_names(burst_id=burst_id)
+        grid = self._deformation_grid(burst_dir, id_loop)
+
+        coregistred_image = self._coregistration(burst_dir, id_loop, grid)
+
+        reramp_coregistred = self._reramp_coregistred(burst_dir, id_loop, grid, coregistred_image)
+
+        interferogram = self._interferogram(burst_dir, id_loop, grid, reramp_coregistred)
+
+        return grid, reramp_coregistred, interferogram
+
+    def _esd_estimation(self, burst_dir, id_loop, burst_id,
+                        interferogram_list):
+        """ESD processing
+        """
+        reference_image = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_IMAGE)]
+        reference_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_DIR)]
+
+        esd_path_dummy = os.path.join(burst_dir, "esdout.tif")
+
+        app_esd = OTBApplicationWrapper("SARESD")
+        app_esd.set_input_images(insar=os.path.join(reference_dir, reference_image),
+                                 ininterfup=interferogram_list[id_loop],
+                                 ininterflow=interferogram_list[id_loop+1])
+        app_esd.set_parameters(burstindex=burst_id, threshold=0.3, mlazi=1)
+        app_esd.set_output_images(out=esd_path_dummy)
+        app_esd.execute_app(in_memory=True)
+
+        return app_esd.get_output_float_parameter("azishift")
+
+    def _esd_shift(self, burst_id, id_loop, last_burst,
+                   azimut_shift_esd_global, azimut_shift_esd):
+        """Adjust azimut shift according to the burst_id
+        """
+        # Adjust azimut shift according to the burstId
+        azi_shift = 0.
+        last_burst_1 = last_burst-1
+        if int(burst_id) == (last_burst_1):
+            # Only accumulation between iterations
+            azimut_shift_esd_global[id_loop] += azimut_shift_esd[id_loop]
+            azi_shift = azimut_shift_esd_global[id_loop]
+        elif int(burst_id) == last_burst:
+            # Same as the last_burst -1
+            azi_shift = azimut_shift_esd_global[id_loop - 1]
+        else:
+            # Accumulation of means between the current burstId and the next index
+            azimut_shift_esd_global[id_loop] += ((azimut_shift_esd[id_loop] +
+                                                  azimut_shift_esd[id_loop+1])/2)
+            azi_shift = azimut_shift_esd_global[id_loop]
+
+        return azi_shift
+
+    def _esd_correction(self, burst_dir, id_loop, grid_list, azimut_shift):
+        """ Apply the azimut shift on grids and re-estimate an interferogram
+        """
+        print(azimut_shift)
+
+        # retrieve inputs
+        deramp_reference = self._inputs[str(DInSarInputKeysS1IW.DERAMP_REFERENCE)][id_loop]
+        deramp_secondary = self._inputs[str(DInSarInputKeysS1IW.DERAMP_SECONDARY)][id_loop]
+        doppler0_secondary = self._inputs[str(DInSarInputKeysS1IW.DOP0_SECONDARY)][id_loop]
+        burst_reference = self._inputs[str(DInSarInputKeysS1IW.BURSTS_REFERENCE)][id_loop]
+        burst_secondary = self._inputs[str(DInSarInputKeysS1IW.BURSTS_SECONDARY)][id_loop]
+        cartmean_reference = self._inputs[str(DInSarInputKeysS1IW.CARTESIAN_ESTIMATION_REFERENCE)][id_loop]
+
+        # Define dummy names (in memory Pipeline)
+        grid_path_dummy = os.path.join(burst_dir, "grid.tif")
+        coregistred_path_dummy = os.path.join(burst_dir, "coregistrated.tif")
+        deramp_path_dummy = os.path.join(burst_dir, "reramp.tif")
+
+        # Get output path for interferogram
+        output_dir = os.path.join(burst_dir, "esd")
+
+        # pylint: disable=no-member
+        interferogram_path = os.path.join(output_dir,
+                                          self.file_handler_secondary.get_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key()))
+        # pylint: enable=no-member
+
+        print(interferogram_path)
+
+        # Apply the offset on the deformation grid (an offset only in azimut)
+        app_grid = OTBApplicationWrapper("SARGridOffset")
+        app_grid.set_input_images(ingrid=grid_list[id_loop])
+        app_grid.set_parameters(offsetran=0, offsetazi=azimut_shift)
+        app_grid.set_output_images(out=grid_path_dummy)
+        app_grid.execute_app(in_memory=True)
+
+        # Define some hard-coded parameters
+        nb_ramps = 256*2*10+1
+        tiles_size = 50
+        margin = 7
+        reramp = "true"
+        shift = "true"
+        marginran = 1
+        marginazi = 1
+        # ml factors have to be 1x1 to call esd, then.
+        mlran = 1
+        mlazi = 1
+
+        # Instanciate and execute SARCoRegistration
+        app_core = OTBApplicationWrapper("SARCoRegistration")
+        app_core.set_input_images(insarmaster=deramp_reference,
+                                  insarslave=deramp_secondary,
+                                  ingrid=app_grid.get_output_image("out"))
+        app_core.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                doppler0=doppler0_secondary,
+                                sizetiles=tiles_size,
+                                margin=margin,
+                                nbramps=nb_ramps)
+        app_core.set_output_images(out=coregistred_path_dummy)
+        app_core.execute_app(in_memory=True)
+
+        # Instanciate and execute SARDeramp
+        app_deramp = OTBApplicationWrapper("SARDeramp")
+        app_deramp.set_input_images(in_=app_core.get_output_image("out"),
+                                    ingrid=app_grid.get_output_image("out"),
+                                    inslave=deramp_secondary)
+        app_deramp.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                  reramp=reramp, shift=shift)
+        app_deramp.set_output_images(out=deramp_path_dummy)
+        app_deramp.execute_app(in_memory=True)
+
+        # Instanciate and execute SARRobustInterferogram
+        app_interf = OTBApplicationWrapper("SARRobustInterferogram")
+        app_interf.set_input_images(insarmaster=burst_reference,
+                                    insarslave=burst_secondary,
+                                    ingrid=app_grid.get_output_image("out"),
+                                    incoregistratedslave=app_deramp.get_output_image("out"),
+                                    incartmeanmaster=cartmean_reference)
+        app_interf.set_parameters(gridsteprange=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_RAN)),
+                                  gridstepazimut=self.param_handler.get_param(str(DInSarParamS1IW.GRID_STEP_AZI)),
+                                  mlran=mlran,
+                                  mlazi=mlazi,
+                                  marginran=marginran, marginazi=marginazi,
+                                  gain=self.param_handler.get_param(str(DInSarParamS1IW.INTERF_GAIN)))
+        app_interf.set_output_images(out=interferogram_path)
+        app_interf.execute_app(in_memory=False)
+
+        # Return the output interferogram
+        return interferogram_path
+
+    def _esd_loop(self, esd_nbiter, burst_ids_list, output_dir,
+                  grid_list, interferogram_list):
+        """ESD processing
+        """
+        # Check the number of burst
+        nb_burst = len(grid_list)
+
+        if nb_burst < 2:
+            print("At least two burst are required to laucnh esd processing")
+            return 1
+
+        # Empty shift
+        azimut_shift_esd = []
+        azimut_shift_esd_global = [0.] * (len(burst_ids_list))
+
+        current_interferogram_list = interferogram_list
+
+        # ESD Loop
+        for iter_esd in range(1, esd_nbiter+1):
+            # Clear all azimut shifts
+            azimut_shift_esd[:] = []
+
+            # loop on burst
+            last_burst = burst_ids_list[-1]
+
+            # First, we need to estimate for each burst the current shitf to apply on azimut
+            for id_loop in range(0, len(burst_ids_list)):
+                burst_id = burst_ids_list[id_loop]
+
+                burst_dir = os.path.join(output_dir, "burst" + str(burst_id))
+
+                if burst_id < last_burst:
+                    azimut_shift_esd_current = self._esd_estimation(burst_dir,
+                                                                    id_loop,
+                                                                    burst_id,
+                                                                    current_interferogram_list)
+
+                    azimut_shift_esd.append(azimut_shift_esd_current)
+
+            # ReInit interferogram_list (new ref) to update the current interferograms for esd loop
+            current_interferogram_list = []
+            # ESD Correction for each burst
+            for id_loop in range(0, len(burst_ids_list)):
+                burst_id = burst_ids_list[id_loop]
+
+                burst_dir = os.path.join(output_dir, "burst" + str(burst_id))
+
+                # Then, the azi_shift is calculated for each burst following next burst
+                azi_shift = self._esd_shift(burst_id, id_loop, int(last_burst),
+                                            azimut_shift_esd_global, azimut_shift_esd)
+
+                # Change interferogram names
+                ext = "_iter" + str(iter_esd)
+                self.file_handler_secondary.create_intermediate_names(burst_id=str(burst_id))
+                # pylint: disable=no-member
+                self.file_handler_secondary.add_extension_to_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key(), ext)
+                # pylint: enable=no-member
+
+                # Eventually, apply the correction and re-build an interferogram
+                interferogram_path = self._esd_correction(burst_dir, id_loop,
+                                                          grid_list, azi_shift)
+                current_interferogram_list.append(interferogram_path)
+
+        return current_interferogram_list
+
+
+    def execute(self):
+        """DInSar chain for S1IW
+
+        xx applications are called here :
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+
+        # retrieve input : output_dir
+        output_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)]
+
+        # Retrieve the specific parameters :
+        # burst_ids : id of the burst
+        # burst_to_process : burst to extract and then to process
+        # (can be different for secondary image if burst ids
+        # betwwen reference and secondary image do not match)
+        burst_ids_list = self.param_handler.get_param(str(DInSarParamS1IW.BURSTIDS))
+
+        # Empty lists
+        grid_list = []
+        reramp_coregistred_list = []
+        interferogram_list = []
+
+        # loop on burst
+        for id_loop in range(0, len(burst_ids_list)):
+            burst_id = burst_ids_list[id_loop]
+
+            # Init filenames (mostly use filename_secondary)
+            self.file_handler_secondary.create_intermediate_names(burst_id=str(burst_id))
+            self.file_handler_reference.create_intermediate_names(burst_id=str(burst_id))
+
+            ext = "_iter" + str(0)
+            # pylint: disable=no-member
+            self.file_handler_secondary.add_extension_to_filename(DInSarFilenames.FILES_AFTER_INTERF.get_key(), ext)
+            # pylint: enable=no-member
+
+            # Output directory for the current burst
+            burst_dir = os.path.join(output_dir, "burst" + str(burst_id))
+
+            print(self.file_handler_secondary.filenames)
+            #print(self.file_handler_reference.filenames)
+
+            # Process the current burst
+            grid, reramp_coregistred, interferogram = self.execute_one_burst(burst_dir, id_loop)
+
+            grid_list.append(grid)
+            reramp_coregistred_list.append(reramp_coregistred)
+            interferogram_list.append(interferogram)
+
+        # ESD processing
+        interferogram_esd_list = interferogram_list
+        esd_nbiter = self.param_handler.get_param(str(DInSarParamS1IW.ESD_ITER))
+        if esd_nbiter > 0:
+            interferogram_esd_list = self._esd_loop(esd_nbiter,
+                                                    burst_ids_list,
+                                                    output_dir,
+                                                    grid_list,
+                                                    interferogram_list)
+
+        # Concatenate bursts
+        interferogram = self._concatenate(output_dir, interferogram_esd_list, burst_ids_list[0])
+
+        # Assign outputs
+        self._outputs[str(DInSarOutputKeysS1IW.GRIDS)] = grid_list
+        self._outputs[str(DInSarOutputKeysS1IW.COREGISTRATED_SECONDARY_RERAMP)] = reramp_coregistred_list
+        self._outputs[str(DInSarOutputKeysS1IW.INTERFEROS)] = interferogram_esd_list
+        self._outputs[str(DInSarOutputKeysS1IW.INTERFERO)] = interferogram
diff --git a/python_src/lib/processing/DiapOTBProcessingFactory.py b/python_src/lib/processing/DiapOTBProcessingFactory.py
new file mode 100644
index 0000000000000000000000000000000000000000..08caba659436b5f7b87dcb1cbeb7b4ee16385bf8
--- /dev/null
+++ b/python_src/lib/processing/DiapOTBProcessingFactory.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Factory to create every kind of processing
+"""
+
+from .PreProcessing import PreProcessing
+from .Ground import Ground
+from .DInSAR import DInSAR
+from .PostProcessing import PostProcessing
+from .core.DiapOTBEnums import ChainNames, ChainModes
+from .core.DiapOTBExceptions import DiapOTBException
+
+
+# Factory
+class DiapOTBProcessingFactory():
+    """Factory to create all processing
+    """
+    # Static dictionary to match the chain_name with a class
+    find_class = {str(ChainNames.PRE_PROCESSING): PreProcessing,
+                  str(ChainNames.GROUND): Ground,
+                  str(ChainNames.DINSAR): DInSAR,
+                  str(ChainNames.POST_PROCESSING) : PostProcessing}
+
+    def __init__(self, mode=ChainModes.OTHERS):
+        self._mode = mode
+
+    def create_processing(self, chain_name, **kwargs):
+        """Classmethod to create a given processing
+        """
+        # Check if chain_name matchs to a processing
+        if chain_name not in ChainNames.list():
+            raise DiapOTBException("Unkwown chain : " + str(chain_name) +
+                                   ". Only these following chains are available: " +
+                                   str(ChainNames.list()))
+
+        # Add mode
+        kwargs["mode"] = self._mode
+        instance = self.find_class[str(chain_name)](**kwargs)
+        return instance
diff --git a/python_src/lib/processing/Ground.py b/python_src/lib/processing/Ground.py
new file mode 100644
index 0000000000000000000000000000000000000000..df41c6bbd26d7c41f681ad9ef24e69855e7b7e7e
--- /dev/null
+++ b/python_src/lib/processing/Ground.py
@@ -0,0 +1,311 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Ground chain
+"""
+
+import os
+
+from .core.DiapOTBProcessing import DiapOTBProcessingSingleImage, \
+    ParamHandler, FilenamesHandler, \
+    ExecutorSingleImage
+from .core.DiapOTBEnums import ExtendedEnum, ChainModes, \
+    ExtPosition, FilenamesEnum, extend_enum, \
+    RequiredKeysForSingleImageProcessing, DefaultEnum
+from .core.DiapOTBExceptions import DiapOTBException
+
+from .core.ApplicationWrapper import OTBApplicationWrapper
+
+
+# pylint: disable=too-few-public-methods
+# Specific enums
+class GroundParamOthers(ExtendedEnum):
+    """Define each required parameters for Ground chain
+    """
+    XYZ = "withxyz"
+    NO_DATA = "nodata"
+    CARTESIAN_ESTIMATION = "withcart"
+
+
+@extend_enum(GroundParamOthers)
+class GroundParamS1IW(ExtendedEnum):
+    """Define each required parameters for Ground chain
+    (Others keys + the following ones)
+    """
+    BURSTIDS = "burst_ids"
+
+
+class GroundDefaultValue(DefaultEnum):
+    """Define some default values or redirect to other values
+    The following paramaters are optional (other are mandatory)
+    """
+    XYZ = (str(GroundParamOthers.XYZ), True)
+    NO_DATA = (str(GroundParamOthers.NO_DATA), -32768)
+    CARTESIAN_ESTIMATION = (str(GroundParamOthers.CARTESIAN_ESTIMATION), True)
+
+
+class GroundFilenames(FilenamesEnum):
+    """Define key for intermediate/output filenames for Ground chain
+    3 str to speficy, a key, the extension to add and the position of the extension in order to create the file
+    """
+    FILES_AFTER_DEMPROJ = ("dem_proj_files", "demProj_",
+                           str(ExtPosition.PREFIX))
+    FILES_AFTER_CARTESAIN_ESTIMATION = ("cart_files", "cartMean_",
+                                        str(ExtPosition.PREFIX))
+
+
+class GroundInputKeysOthers(ExtendedEnum):
+    """Define intput keys for Ground chain
+    """
+    DEM = "dem"
+
+
+class GroundInputKeysS1IW(ExtendedEnum):
+    """Define intput keys for Ground chain
+    """
+    DEM = "dem"
+    DERAMP = "deramped_burst_list"
+
+
+class GroundOutputKeys(ExtendedEnum):
+    """Define output keys for PreProcessing chain
+    """
+    DEMPROJ = "dem_proj_list"
+    CARTESIAN_ESTIMATION = "cart_list"
+
+# pylint: enable=too-few-public-methods
+
+
+# Ground class
+class Ground(DiapOTBProcessingSingleImage):
+    """Use the module to launch Ground chain.
+
+    main function : execute
+    """
+
+    def __init__(self, **kwargs):
+        # Base constructor to init required elts such as image/dir, output_dir and parameters
+        super().__init__(**kwargs)
+
+        # Init the specific arguments for the PreProcessing chain
+        self._name = "Ground"
+        self._applications = ["SARDEMProjection", "SARCartesianMeanEstimation"]
+
+        self._mode = str(ChainModes.OTHERS)
+        if "mode" in kwargs and str(kwargs["mode"]) in ChainModes.list():
+            self._mode = str(kwargs["mode"])
+
+        print(self._mode)
+
+        # Init Handlers according to the mode
+        param_enum = GroundParamOthers
+        file_enum = GroundFilenames
+        default_param_enum = GroundDefaultValue
+        self._inputs_list = GroundInputKeysOthers.list()
+
+        if self._mode == str(ChainModes.S1_IW):
+            param_enum = GroundParamS1IW
+            self._inputs_list = GroundInputKeysS1IW.list()
+
+        self.param_handler = ParamHandler(param_enum,
+                                          self._param,
+                                          default_param_enum)
+
+        self.file_handler = FilenamesHandler(file_enum,
+                                             self._image_base,
+                                             self._mode)
+
+        # Get from parameter dictionary each argument
+        self.param_handler.check_param()
+
+    def retrieve_output(self, key):
+        """Retrieve a given output of the Ground chain
+        """
+        if not isinstance(key, GroundOutputKeys) and key not in GroundOutputKeys.list():
+            raise DiapOTBException("The current key is not a available "
+                                   "output key for PreProcessing chain")
+
+        return self._dict_outputs[str(key)]
+
+    # Process functions
+    def execute(self, **kwargs):
+        """ Ground chain
+        """
+        if str(self._mode) == str(ChainModes.OTHERS):
+            self._input_enum = GroundInputKeysOthers
+        else:
+            self._input_enum = GroundInputKeysS1IW
+
+        self._executor_builder.add_mode(ChainModes.OTHERS,
+                                        ExecutorGroundOthers)
+
+        self._executor_builder.add_mode(ChainModes.S1_IW,
+                                        ExecutorGroundS1IW)
+
+        super().execute(**kwargs)
+
+
+# Executors, one per mode
+class ExecutorGroundOthers(ExecutorSingleImage):
+    """Execute processing for pre porcessing chain mode S1SM-CSK
+    """
+    def execute(self):
+        """PreProcessing chain for S1SM and CSK sensors
+
+        Two applications are called here : SARDEMProjection and SARCartesianMeanEstimation on the single input image
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+
+        # retrieve input : image and output_dir
+        output_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)]
+
+
+        # Retrieve parameters
+        withxyz = self.param_handler.get_param(str(GroundParamOthers.XYZ))
+        no_data = self.param_handler.get_param(str(GroundParamOthers.NO_DATA))
+        withcart = self.param_handler.get_param(str(GroundParamOthers.CARTESIAN_ESTIMATION))
+
+        self.file_handler.create_intermediate_names()
+
+        # SARDEMProjection
+        # pylint: disable=no-member
+        dem_proj_path = os.path.join(output_dir,
+                                     self.file_handler.get_filename(GroundFilenames.FILES_AFTER_DEMPROJ.get_key()))
+        # pylint: enable=no-member
+        # adapt_image_format to handle h5 dataset (CSK sensor)
+        app_dem_proj = OTBApplicationWrapper("SARDEMProjection")
+        app_dem_proj.set_input_images(insar=self.image_path(),
+                                      indem=self._inputs[str(GroundInputKeysOthers.DEM)])
+        app_dem_proj.set_parameters(withxyz=withxyz, nodata=no_data)
+        app_dem_proj.set_output_images(out=dem_proj_path)
+        app_dem_proj.execute_app(in_memory=False)
+
+        dir_dem_c = app_dem_proj.get_output_int_parameter("directiontoscandemc")
+        dir_dem_l = app_dem_proj.get_output_int_parameter("directiontoscandeml")
+
+        self._outputs[str(GroundOutputKeys.DEMPROJ)] = [dem_proj_path]
+
+        # SARCartesianMeanEstimation
+        # adapt_image_format to handle h5 dataset (CSK sensor)
+        if withcart:
+            # pylint: disable=no-member
+            cart_mean_path = os.path.join(output_dir,
+                                          self.file_handler.get_filename(GroundFilenames.FILES_AFTER_CARTESAIN_ESTIMATION.get_key()))
+            # pylint: enable=no-member
+            app_cart_mean = OTBApplicationWrapper("SARCartesianMeanEstimation")
+            app_cart_mean.set_input_images(insar=self.image_path(),
+                                           indem=self._inputs[str(GroundInputKeysOthers.DEM)],
+                                           indemproj=dem_proj_path)
+            app_cart_mean.set_parameters(indirectiondemc=dir_dem_c,
+                                         indirectiondeml=dir_dem_l,
+                                         mlran=1, mlazi=1)
+            app_cart_mean.set_output_images(out=cart_mean_path)
+            app_cart_mean.execute_app(in_memory=False)
+
+            self._outputs[str(GroundOutputKeys.CARTESIAN_ESTIMATION)] = [cart_mean_path]
+
+
+
+class ExecutorGroundS1IW(ExecutorSingleImage):
+    """Execute processing for ground chain mode S1SM-CSK
+    """
+
+    def execute_one_burst(self, deramp_in, output_dir,
+                          burst_id):
+        """Execute grpind chain for each burst
+        """
+
+        # Retrieve parameters
+        withxyz = self.param_handler.get_param(str(GroundParamS1IW.XYZ))
+        no_data = self.param_handler.get_param(str(GroundParamS1IW.NO_DATA))
+        withcart = self.param_handler.get_param(str(GroundParamS1IW.CARTESIAN_ESTIMATION))
+
+
+        self.file_handler.create_intermediate_names(burst_id=burst_id)
+
+        # Output directory for the current burst
+        burst_dir = os.path.join(output_dir, "burst" + str(burst_id))
+
+        # Input image for applications : deramped burst
+        in_image = deramp_in
+
+        # SARDEMProjection
+        # pylint: disable=no-member
+        dem_proj_path = os.path.join(burst_dir,
+                                     self.file_handler.get_filename(GroundFilenames.FILES_AFTER_DEMPROJ.get_key()))
+        # pylint: enable=no-member
+        app_dem_proj = OTBApplicationWrapper("SARDEMProjection")
+        app_dem_proj.set_input_images(insar=in_image,
+                                      indem=self._inputs[str(GroundInputKeysS1IW.DEM)])
+        app_dem_proj.set_parameters(withxyz=withxyz, nodata=no_data)
+        app_dem_proj.set_output_images(out=dem_proj_path)
+        app_dem_proj.execute_app(in_memory=False)
+
+        dir_dem_c = app_dem_proj.get_output_int_parameter("directiontoscandemc")
+        dir_dem_l = app_dem_proj.get_output_int_parameter("directiontoscandeml")
+
+        print("dir_dem_l" + str(dir_dem_l))
+        print("dir_dem_c" + str(dir_dem_c))
+
+        # SARCartesianMeanEstimation
+        cart_mean_path = ""
+        if withcart:
+            # pylint: disable=no-member
+            cart_mean_path = os.path.join(burst_dir,
+                                          self.file_handler.get_filename(GroundFilenames.FILES_AFTER_CARTESAIN_ESTIMATION.get_key()))
+            # pylint: enable=no-member
+            app_cart_mean = OTBApplicationWrapper("SARCartesianMeanEstimation")
+            app_cart_mean.set_input_images(insar=in_image,
+                                           indem=self._inputs[str(GroundInputKeysS1IW.DEM)],
+                                           indemproj=dem_proj_path)
+            app_cart_mean.set_parameters(indirectiondemc=dir_dem_c,
+                                         indirectiondeml=dir_dem_l,
+                                         mlran=1, mlazi=1)
+            app_cart_mean.set_output_images(out=cart_mean_path)
+            app_cart_mean.execute_app(in_memory=False)
+
+        # Return outputs
+        return dem_proj_path, cart_mean_path
+
+
+    def execute(self):
+        """PreProcessing chain for S1SM and CSK sensors
+
+        Two applications are called here : SARDEMProjection and SARCartesianMeanEstimation on the single input image
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+
+        # retrieve input : output_dir
+        output_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)]
+
+        # Retrieve the specific parameters :
+        # burst_ids : id of the burst
+        # burst_to_process : burst to extract and then to process
+        # (can be different for secondary image if burst ids betwwen reference and secondary image do not match)
+
+        burst_ids_list = self.param_handler.get_param(str(GroundParamS1IW.BURSTIDS))
+
+        # Empty lists
+        dem_proj_list = []
+        cart_mean_list = []
+
+        # loop on burst
+        for id_loop in range(0, len(burst_ids_list)):
+            burst_id = burst_ids_list[id_loop]
+
+            # retrieve the current deramp file for the given index
+            deramp_in = self._inputs[str(GroundInputKeysS1IW.DERAMP)][id_loop]
+
+            # Process the current burst
+            dem_proj_path, cart_mean_path = self.execute_one_burst(deramp_in,
+                                                                   output_dir,
+                                                                   burst_id)
+
+            dem_proj_list.append(dem_proj_path)
+            if cart_mean_path != "":
+                cart_mean_list .append(cart_mean_path)
+
+        self._outputs[str(GroundOutputKeys.DEMPROJ)] = dem_proj_list
+        self._outputs[str(GroundOutputKeys.CARTESIAN_ESTIMATION)] = cart_mean_list
diff --git a/python_src/lib/processing/PostProcessing.py b/python_src/lib/processing/PostProcessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..310cd8386b3f66ac29aa2c8738c48fe70e23c70d
--- /dev/null
+++ b/python_src/lib/processing/PostProcessing.py
@@ -0,0 +1,289 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Post Processing chain
+"""
+
+import os
+
+from .core.DiapOTBProcessing import DiapOTBProcessingDualImages, \
+    ParamHandler, FilenamesHandler, \
+    ExecutorDualImages
+from .core.DiapOTBEnums import ExtendedEnum, ChainModes, \
+    ExtPosition, FilenamesEnum, \
+    RequiredKeysForDualImagesProcessing, extend_enum, DefaultEnum
+from .core.DiapOTBExceptions import DiapOTBException
+
+from .PreProcessing import PreProcessingOutputKeys
+from .Ground import GroundOutputKeys
+from .DInSAR import DInSarOutputKeysOthers, DInSarOutputKeysS1IW
+
+from .core.ApplicationWrapper import OTBApplicationWrapper
+from .core.ConfigFile import ConfigParamPattern
+
+# pylint: disable=too-few-public-methods
+# Specific enums
+class PostProcessingParamS1IW(ExtendedEnum):
+    """Define each required parameters for PostProcessing chain
+    """
+    ML_FILT_AZI = str(ConfigParamPattern.FILTMLAZI)
+    ML_FILT_RAN = str(ConfigParamPattern.FILTMLRAN)
+    ML_FILT_GAIN = "ml_filt_interf_gain"
+    FILT_ALPHA = str(ConfigParamPattern.FILTALPHA)
+    INTERF_GAIN = str(ConfigParamPattern.INTERFGAIN)
+
+
+@extend_enum(PostProcessingParamS1IW)
+class PostProcessingParamOthers(ExtendedEnum):
+    """Define each required parameters for PostProcessing chain
+    """
+    GRID_STEP_RAN = str(ConfigParamPattern.GRIDSTEPRAN)
+    GRID_STEP_AZI = str(ConfigParamPattern.GRIDSTEPAZI)
+
+
+class PostProcessingDefaultValue(DefaultEnum):
+    """Define some default values or redirect to other values
+    The following paramaters are optional (other are mandatory)
+    """
+    ML_FILT_AZI = (str(PostProcessingParamS1IW.ML_FILT_AZI), 3)
+    ML_FILT_RAN = (str(PostProcessingParamS1IW.ML_FILT_RAN), 3)
+    ML_FILT_GAIN = (str(PostProcessingParamS1IW.ML_FILT_GAIN),  str(ConfigParamPattern.INTERFGAIN))
+    FILT_ALPHA = (str(PostProcessingParamS1IW.FILT_ALPHA), 0.7)
+
+class PostProcessingFilenames(FilenamesEnum):
+    """Define key for intermediate/output filenames for PostProcesssing chain
+    3 str to speficy, a key, the extension to add and the position of the extension in order to create the file
+    """
+    FILES_AFTER_PHASE_FILTERING = ("phase_files", "filfPhaCoh", str(ExtPosition.EXCLUSIF))
+    FILES_AFTER_FILTERING = ("filtered_files", "filtered_interferogram",
+                             str(ExtPosition.EXCLUSIF))
+
+
+class PostProcessingInputKeysOthers(ExtendedEnum):
+    """Define intput keys for PostProcessing chain
+    """
+    CARTESIAN_ESTIMATION_REFERENCE = str(GroundOutputKeys.CARTESIAN_ESTIMATION) + "_reference"
+    COREGISTRATED_SECONDARY = str(DInSarOutputKeysOthers.COREGISTRATED_SECONDARY)
+    GRIDS = str(DInSarOutputKeysOthers.GRIDS)
+
+
+class PostProcessingInputKeysS1IW(ExtendedEnum):
+    """Define intput keys for PostProcessing chain
+    """
+    INTERFERO = str(DInSarOutputKeysS1IW.INTERFERO)
+
+
+class PostProcessingOutputKeys(ExtendedEnum):
+    """Define output keys for PostProcessing chain
+    """
+    FILT_INTERFERO = "filtered_interferogram"
+
+# pylint: enable=too-few-public-methods
+
+
+class PostProcessing(DiapOTBProcessingDualImages):
+    """Use the module to launch PostProcessing chain.
+
+    main function : execute
+    """
+    def __init__(self, **kwargs):
+                # Base constructor to init required elts such as image/dir, output_dir and parameters
+        super().__init__(**kwargs)
+
+        # Init the specific arguments for the DINSAR chain
+        self._name = "PostProcessing"
+        self._applications = ["SAR"]
+
+        self._mode = str(ChainModes.OTHERS)
+        if "mode" in kwargs and str(kwargs["mode"]) in ChainModes.list():
+            self._mode = str(kwargs["mode"])
+
+        print(self._mode)
+
+        # Init Handlers according to the mode
+        param_enum = PostProcessingParamOthers
+        file_enum = PostProcessingFilenames
+        default_param_enum = PostProcessingDefaultValue
+        self._inputs_list = PostProcessingInputKeysOthers.list()
+
+        if self._mode == str(ChainModes.S1_IW):
+            self._inputs_list = PostProcessingInputKeysS1IW.list()
+            param_enum = PostProcessingParamS1IW
+
+        print(self._inputs_list)
+
+        self.param_handler = ParamHandler(param_enum,
+                                          self._param,
+                                          default_param_enum)
+
+        self.file_handler_reference = FilenamesHandler(file_enum,
+                                                       self._reference_base,
+                                                       self._mode)
+
+        self.file_handler_secondary = FilenamesHandler(file_enum,
+                                                       self._secondary_base,
+                                                       self._mode)
+
+        # Get from parameter dictionary each argument
+        self.param_handler.check_param()
+
+    def retrieve_output(self, key):
+        """Retrieve a given output of PostProcessing chain
+        """
+        if not isinstance(key, PostProcessingOutputKeys) and \
+           key not in PostProcessingOutputKeys.list():
+            raise DiapOTBException("The current key is not a available "
+                                   "output key for PostProcessing chain")
+
+        return self._dict_outputs[str(key)]
+
+
+    # Process functions
+    def execute(self, **kwargs):
+        """ PsotProcessing chain
+        """
+
+        if str(self._mode) == str(ChainModes.OTHERS):
+            self._input_enum = PostProcessingInputKeysOthers
+        else:
+            self._input_enum = PostProcessingInputKeysS1IW
+
+        self._executor_builder.add_mode(ChainModes.OTHERS,
+                                        ExecutorPostProcessingOthers)
+
+        self._executor_builder.add_mode(ChainModes.S1_IW,
+                                        ExecutorPostProcessingS1IW)
+
+        super().execute(**kwargs)
+
+
+# Executors, one per mode
+class ExecutorPostProcessingOthers(ExecutorDualImages):
+    """Execute processing for PostProcessing chain mode S1SM-CSK
+    """
+
+    def execute(self):
+        """PostProcessing chain for S1SM and CSK sensors
+
+        xx applications are called here :
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+        # retrieve input : output_dir
+        output_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)]
+
+        self.file_handler_secondary.create_intermediate_names()
+
+        # Retrieve inputs
+        cartmean_reference = self._inputs[str(PostProcessingInputKeysOthers.CARTESIAN_ESTIMATION_REFERENCE)][0]
+        grid = self._inputs[str(PostProcessingInputKeysOthers.GRIDS)][0]
+        coregistred = self._inputs[str(PostProcessingInputKeysOthers.COREGISTRATED_SECONDARY)]
+
+        # Define dummy names (in memory Pipeline)
+        topo_phase_dummy = os.path.join(output_dir, "topo.tif")
+        compensated_complex_dummy = os.path.join(output_dir, "complex.tif")
+
+        # SARTopographicPhase wiht ml factors set to 1
+        app_topo = OTBApplicationWrapper("SARTopographicPhase")
+        app_topo.set_input_images(insarslave=self.secondary_path(),
+                                  ingrid=grid, incartmeanmaster=cartmean_reference)
+        app_topo.set_parameters(mlran=1,
+                                mlazi=1,
+                                gridsteprange=self.param_handler.get_param(str(PostProcessingParamOthers.GRID_STEP_RAN)),
+                                gridstepazimut=self.param_handler.get_param(str(PostProcessingParamOthers.GRID_STEP_AZI)))
+        app_topo.set_output_images(out=topo_phase_dummy)
+        app_topo.execute_app(in_memory=True)
+
+        # SARCompensatedComplex
+        app_complex = OTBApplicationWrapper("SARCompensatedComplex")
+        app_complex.set_input_images(insarmaster=self.reference_path(),
+                                     insarslave=coregistred, topographicphase=app_topo.get_output_image("out"))
+        app_complex.set_output_images(out=compensated_complex_dummy)
+        app_complex.execute_app(in_memory=True)
+
+        # SARPhaseFiltering
+        # pylint: disable=no-member
+        out_phase = os.path.join(output_dir,
+                                 self.file_handler_secondary.get_filename(PostProcessingFilenames.FILES_AFTER_PHASE_FILTERING.get_key()))
+        # pylint: enable=no-member
+        app_phase = OTBApplicationWrapper("SARPhaseFiltering")
+        app_phase.set_input_images(incomplex=app_complex.get_output_image("out"))
+        app_phase.set_parameters(mlran=self.param_handler.get_param(str(PostProcessingParamOthers.ML_FILT_RAN)),
+                                 mlazi=self.param_handler.get_param(str(PostProcessingParamOthers.ML_FILT_AZI)),
+                                 step=16, sizetiles=64,
+                                 alpha=self.param_handler.get_param(str(PostProcessingParamOthers.FILT_ALPHA)))
+        app_phase.set_output_images(out=out_phase)
+        app_phase.execute_app(in_memory=False)
+
+
+        # SARAddBandInterferogram
+        # pylint: disable=no-member
+        out_filtered = os.path.join(output_dir,
+                                    self.file_handler_secondary.get_filename(PostProcessingFilenames.FILES_AFTER_FILTERING.get_key()))
+        # pylint: enable=no-member
+        app_amp = OTBApplicationWrapper("SARAddBandInterferogram")
+        app_amp.set_input_images(incomplexamp=app_complex.get_output_image("out"), ininterf=out_phase)
+        app_amp.set_parameters(mlran=self.param_handler.get_param(str(PostProcessingParamOthers.ML_FILT_RAN)),
+                               mlazi=self.param_handler.get_param(str(PostProcessingParamOthers.ML_FILT_AZI)),
+                               gain=self.param_handler.get_param(str(PostProcessingParamOthers.ML_FILT_GAIN)))
+        app_amp.set_output_images(out=out_filtered)
+        app_amp.execute_app(in_memory=False)
+
+        # Assign outputs
+        self._outputs[str(PostProcessingOutputKeys.FILT_INTERFERO)] = out_filtered
+
+
+        print(self._inputs)
+
+
+class ExecutorPostProcessingS1IW(ExecutorDualImages):
+    """Execute processing for PostProcessing chain mode S1IW
+    """
+    def execute(self):
+        """PostProcessing chain for S1IW sensors
+
+        xx applications are called here :
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+        # retrieve input : output_dir
+        output_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)]
+
+        self.file_handler_secondary.create_intermediate_names()
+
+        # Retrieve interferogram
+        interferogram = self._inputs[str(PostProcessingInputKeysS1IW.INTERFERO)]
+
+        # SARPhaseFiltering
+        # pylint: disable=no-member
+        out_phase = os.path.join(output_dir,
+                                 self.file_handler_secondary.get_filename(PostProcessingFilenames.FILES_AFTER_PHASE_FILTERING.get_key()))
+        # pylint: enable=no-member
+        app_phase = OTBApplicationWrapper("SARPhaseFiltering")
+        app_phase.set_input_images(ininterf=interferogram)
+        app_phase.set_parameters(mlran=self.param_handler.get_param(str(PostProcessingParamS1IW.ML_FILT_RAN)),
+                                 mlazi=self.param_handler.get_param(str(PostProcessingParamS1IW.ML_FILT_AZI)),
+                                 step=16, sizetiles=64,
+                                 alpha=self.param_handler.get_param(str(PostProcessingParamS1IW.FILT_ALPHA)))
+        app_phase.set_output_images(out=out_phase)
+        app_phase.execute_app(in_memory=False)
+
+
+        # SARAddBandInterferogram
+        # pylint: disable=no-member
+        out_filtered = os.path.join(output_dir,
+                                    self.file_handler_secondary.get_filename(PostProcessingFilenames.FILES_AFTER_FILTERING.get_key()))
+        # pylint: enable=no-member
+        app_amp = OTBApplicationWrapper("SARAddBandInterferogram")
+        app_amp.set_input_images(ininterfamp=interferogram, ininterf=out_phase)
+        # gain is put here to 1 (already applied during interferogram step)
+        app_amp.set_parameters(mlran=self.param_handler.get_param(str(PostProcessingParamS1IW.ML_FILT_RAN)),
+                               mlazi=self.param_handler.get_param(str(PostProcessingParamS1IW.ML_FILT_AZI)),
+                               gain=1)
+        app_amp.set_output_images(out=out_filtered)
+        app_amp.execute_app(in_memory=False)
+
+        # Assign outputs
+        self._outputs[str(PostProcessingOutputKeys.FILT_INTERFERO)] = out_filtered
+
+        print(self._inputs)
diff --git a/python_src/lib/processing/PreProcessing.py b/python_src/lib/processing/PreProcessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce974518b376acfec00849b2b5b76c09ee122180
--- /dev/null
+++ b/python_src/lib/processing/PreProcessing.py
@@ -0,0 +1,299 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Pre-processing chain
+"""
+import os
+
+from .core.DiapOTBProcessing import DiapOTBProcessingSingleImage, \
+    ParamHandler, FilenamesHandler, ExecutorSingleImage
+from .core.DiapOTBEnums import ExtendedEnum, ChainModes, \
+    FilenamesEnum, extend_enum, ExtPosition, \
+    RequiredKeysForSingleImageProcessing
+from .core.DiapOTBExceptions import DiapOTBException
+
+from .core.ApplicationWrapper import OTBApplicationWrapper
+from .core.ConfigFile import ConfigParamPattern
+
+
+# pylint: disable=too-few-public-methods
+# Specific enums
+class PreProcessingParamOthers(ExtendedEnum):
+    """Define each required parameters for PreProcessing S1SM_CSK chain
+    """
+    MLAZI = str(ConfigParamPattern.MLAZI)
+    MLRAN = str(ConfigParamPattern.MLRAN)
+    MLGAIN = str(ConfigParamPattern.MLGAIN)
+    DOPFILE = str(ConfigParamPattern.DOPFILE)
+
+
+@extend_enum(PreProcessingParamOthers)
+class PreProcessingParamS1IW(ExtendedEnum):
+    """Define each required parameters for PreProcessing S1IW chain
+    """
+    BURSTIDS = "burst_ids"
+    BURSTEXTRACT = "burst_to_extract"
+
+
+class PreProcessingFilenamesS1IW(FilenamesEnum):
+    """Define key for intermediate/output filenames for PreProcessing S1IW chain
+    3 str to speficy, a key, the extension to add and the position of the extension in order to create the file
+    """
+    BURSTEXTRACT = ("burst_extract_files", "", str(ExtPosition.SUFIX))
+    DERAMP = ("deramp_files", "_deramp", str(ExtPosition.SUFIX))
+    ML = ("ml_files", "_ml", str(ExtPosition.SUFIX))
+
+
+class PreProcessingFilenamesOthers(FilenamesEnum):
+    """Define key for intermediate/output filenames for PreProcessing Others chain
+    3 str to speficy, a key, the extension to add and the position of the extension in order to create the file
+    """
+    ML = ("ml_files", "_ml", str(ExtPosition.SUFIX))
+
+
+class PreProcessingOutputKeys(ExtendedEnum):
+    """Define output keys for PreProcessing chain
+    """
+    DOP0 = "doppler_0_list"
+    BURSTS = "burst_list"
+    DERAMP = "deramped_burst_list"
+    ML = "ml_list"
+
+# pylint: enable=too-few-public-methods
+
+# PreProcessing class
+class PreProcessing(DiapOTBProcessingSingleImage):
+    """Use the module to launch Pre_Processing chain.
+
+    main function : execute
+    """
+
+    def __init__(self, **kwargs):
+        # Base constructor to init required elts such as image/dir, output_dir and parameters
+        super().__init__(**kwargs)
+
+        # Init the specific arguments for the PreProcessing chain
+        self._name = "Pre Processing"
+        self._applications = ["SARDoppler0", "SARMultiLook",
+                              "SARDeramp", "SARBurstExtraction"]
+
+        self._mode = str(ChainModes.OTHERS)
+        if "mode" in kwargs and str(kwargs["mode"]) in ChainModes.list():
+            self._mode = str(kwargs["mode"])
+
+        print(self._mode)
+
+        # Init Handlers according to the mode
+        param_enum = PreProcessingParamOthers
+        file_enum = PreProcessingFilenamesOthers
+
+        if self._mode == str(ChainModes.S1_IW):
+            param_enum = PreProcessingParamS1IW
+            file_enum = PreProcessingFilenamesS1IW
+
+        self.param_handler = ParamHandler(param_enum,
+                                          self._param)
+
+        self.file_handler = FilenamesHandler(file_enum,
+                                             self._image_base,
+                                             self._mode)
+
+        # Get from parameter dictionary each argument
+        self.param_handler.check_param()
+
+    def retrieve_output(self, key):
+        """Retrieve a given output of the PreProcessing chain
+        """
+        if not isinstance(key, PreProcessingOutputKeys) and key not in PreProcessingOutputKeys.list():
+            raise DiapOTBException("The current key is not a available "
+                                   "output key for PreProcessing chain")
+
+        return self._dict_outputs[str(key)]
+
+    # Process functions
+    def execute(self, **kwargs):
+        """PreProcessing chain
+        """
+        if str(self._mode) == str(ChainModes.OTHERS):
+            self._input_enum = None
+        else:
+            self._input_enum = None
+
+        self._executor_builder.add_mode(ChainModes.OTHERS,
+                                        ExecutorPreProcessingOthers)
+
+        self._executor_builder.add_mode(ChainModes.S1_IW,
+                                        ExecutorPreProcessingS1IW)
+
+        super().execute(**kwargs)
+
+
+
+# Executors, one per mode
+class ExecutorPreProcessingOthers(ExecutorSingleImage):
+    """Execute processing for pre porcessing chain mode S1SM-CSK
+    """
+    def execute(self):
+        """PreProcessing chain for S1SM and CSK sensors
+
+        Two applications are called here : SARDoppler0 and SARMultiLook on the single input image
+        These applications have the self._image as input and put outputs in self._output_dir
+        """
+        # retrieve input : image and output_dir
+        output_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)]
+
+        # retrive parameters
+        ml_azimut = self.param_handler.get_param(str(PreProcessingParamOthers.MLAZI))
+        ml_range = self.param_handler.get_param(str(PreProcessingParamOthers.MLRAN))
+        ml_gain = self.param_handler.get_param(str(PreProcessingParamOthers.MLGAIN))
+        dop_file = self.param_handler.get_param(str(PreProcessingParamOthers.DOPFILE))
+
+        self.file_handler.create_intermediate_names(ml_azimut=ml_azimut,
+                                                    ml_range=ml_range)
+
+        # SARDoppler0
+        app_doppler0 = OTBApplicationWrapper("SARDoppler0")
+        app_doppler0.set_input_images(insar=self.image_path())
+        app_doppler0.set_output_images(outfile=os.path.join(output_dir,
+                                                            dop_file))
+        app_doppler0.execute_app(in_memory=False)
+
+        # SARMultiLook
+        # pylint: disable=no-member
+        ml_file = self.file_handler.get_filename(PreProcessingFilenamesOthers.ML.get_key())
+        # pylint: enable=no-member
+        ml_output = os.path.join(output_dir,
+                                 ml_file)
+        app_multilook = OTBApplicationWrapper("SARMultiLook")
+        app_multilook.set_parameters(mlran=ml_range, mlazi=ml_azimut,
+                                     mlgain=ml_gain)
+        app_multilook.set_input_images(incomplex=self.image_path())
+        app_multilook.set_output_images(out=ml_output)
+        app_multilook.execute_app(in_memory=False)
+
+        # Assign the outputs as list to be consistent with S1IW mode
+        self._outputs[str(PreProcessingOutputKeys.DOP0)] = [app_doppler0.get_output_float_parameter("doppler0")]
+        self._outputs[str(PreProcessingOutputKeys.ML)] = [ml_output]
+
+
+class ExecutorPreProcessingS1IW(ExecutorSingleImage):
+    """Execute processing for pre porcessing chain mode S1SM-CSK
+    """
+    def execute_one_burst(self, image, image_dir, output_dir,
+                          burst_id_in, burst_id_out):
+
+        # retrive parameters
+        ml_azimut = self.param_handler.get_param(str(PreProcessingParamOthers.MLAZI))
+        ml_range = self.param_handler.get_param(str(PreProcessingParamOthers.MLRAN))
+        ml_gain = self.param_handler.get_param(str(PreProcessingParamOthers.MLGAIN))
+        dop_file = self.param_handler.get_param(str(PreProcessingParamOthers.DOPFILE))
+
+        # Output directory for the current burst
+        burst_dir = os.path.join(output_dir, "burst" + str(burst_id_out))
+
+        # Intermediate names for the current burst
+        self.file_handler.create_intermediate_names(ml_azimut=ml_azimut,
+                                                    ml_range=ml_range,
+                                                    burst_id=burst_id_out)
+
+        # SARBurstExtraction
+        # pylint: disable=no-member
+        burst_file = self.file_handler.get_filename(PreProcessingFilenamesS1IW.BURSTEXTRACT.get_key())
+        # pylint: enable=no-member
+        burst_output_path = os.path.join(burst_dir,
+                                         burst_file)
+
+        print("burst_index !!!")
+        print(burst_id_in)
+
+        app_burst_extract = OTBApplicationWrapper("SARBurstExtraction")
+        app_burst_extract.set_parameters(burstindex=burst_id_in, allpixels="true")
+        app_burst_extract.set_input_images(in_=os.path.join(image_dir, image))
+        app_burst_extract.set_output_images(out=burst_output_path)
+        app_burst_extract.execute_app(in_memory=False)
+
+        # SARDeramp on extracted burst
+        # pylint: disable=no-member
+        deramp_file = self.file_handler.get_filename(PreProcessingFilenamesS1IW.DERAMP.get_key())
+        # pylint: enable=no-member
+        deramp_output_path = os.path.join(burst_dir,
+                                          deramp_file)
+
+        app_deramp = OTBApplicationWrapper("SARDeramp")
+        app_deramp.set_input_images(in_=burst_output_path)
+        app_deramp.set_output_images(out=deramp_output_path)
+        app_deramp.execute_app(in_memory=False)
+
+        # SARDoppler0 to extimate doppler 0 on deramped burst
+        app_doppler0 = OTBApplicationWrapper("SARDoppler0")
+        app_doppler0.set_input_images(insar=deramp_output_path)
+        app_doppler0.set_output_images(outfile=os.path.join(output_dir, dop_file))
+        app_doppler0.execute_app(in_memory=False)
+
+        # SARMultiLook on deramped burst
+        # pylint: disable=no-member
+        ml_file = self.file_handler.get_filename(PreProcessingFilenamesS1IW.ML.get_key())
+        # pylint: enable=no-member
+        ml_output_path = os.path.join(burst_dir,
+                                      ml_file)
+        app_multilook = OTBApplicationWrapper("SARMultiLook")
+        app_multilook.set_parameters(mlran=ml_range, mlazi=ml_azimut, mlgain=ml_gain)
+        app_multilook.set_input_images(incomplex=deramp_output_path)
+        app_multilook.set_output_images(out=ml_output_path)
+        app_multilook.execute_app(in_memory=False)
+
+        # Return outputs as tuple
+        return app_doppler0.get_output_float_parameter("doppler0"), \
+            burst_output_path, deramp_output_path, ml_output_path
+
+    def execute(self):
+        """PreProcessing chain for S1IW sensor
+
+        Four applications are called here : SARBurstExtraction, SARDeramp, SARDoppler0 and SARMultiLook
+        These applications processed each burst provided in param dictionary with burst_ids as key
+
+        NB : A particular case can be handle with a different burst_ids and burst_to_process.
+        The case rarely occurs and deals with a gap between reference/secondary burst match.
+        """
+        # retrieve input : image and output_dir
+        image_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.DIR)]
+        image_in = self._inputs[str(RequiredKeysForSingleImageProcessing.IMAGE)]
+        output_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)]
+
+        # Retrieve the specific parameters :
+        # burst_ids : id of the burst
+        # burst_to_process : burst to extract and then to process
+        # (can be different for secondary image if burst ids betwwen reference and secondary image do not match)
+
+        burst_ids_list = self.param_handler.get_param(str(PreProcessingParamS1IW.BURSTIDS))
+        burst_ids_to_extract_list = self.param_handler.get_param(str(PreProcessingParamS1IW.BURSTEXTRACT))
+
+        # Create an empty lists
+        dop0_list = []
+        bursts_list = []
+        deramp_list = []
+        ml_list = []
+
+        # loop on burst
+        for id_loop in range(0, len(burst_ids_list)):
+            burst_id_in = burst_ids_to_extract_list[id_loop]
+            burst_id_out = burst_ids_list[id_loop]
+
+            # Process the current burst
+            dop0, bursts, deramp, ml = self.execute_one_burst(image_in,
+                                                              image_dir,
+                                                              output_dir,
+                                                              burst_id_in,
+                                                              burst_id_out)
+            dop0_list.append(dop0)
+            bursts_list.append(bursts)
+            deramp_list.append(deramp)
+            ml_list.append(ml)
+
+        # Assign outptus
+        self._outputs[str(PreProcessingOutputKeys.DOP0)] = dop0_list
+        self._outputs[str(PreProcessingOutputKeys.BURSTS)] = bursts_list
+        self._outputs[str(PreProcessingOutputKeys.DERAMP)] = deramp_list
+        self._outputs[str(PreProcessingOutputKeys.ML)] = ml_list
diff --git a/python_src/lib/processing/__init__.py b/python_src/lib/processing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python_src/lib/processing/core/ApplicationWrapper.py b/python_src/lib/processing/core/ApplicationWrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5613ebf326323f153eec4af69d0c1359212097e
--- /dev/null
+++ b/python_src/lib/processing/core/ApplicationWrapper.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Application wrapper to call OTB/DiapOTB applications
+"""
+
+import otbApplication as otb
+from .DiapOTBExceptions import DiapOTBException
+
+
+class OTBApplicationWrapper():
+    """ Handler to call OTB/DiapOTB applications
+    """
+    # Static argument to define a default ram parameter
+    RAM = 2000
+
+    def __init__(self, app_name):
+        # Create the required OTB application
+        self._app = otb.Registry.CreateApplication(app_name)
+        if self._app is None:
+            raise DiapOTBException("Application Name does not match with any OTB/DiapOTB application : " +
+                                   str(otb.Registry.GetAvailableApplications()) +
+                                   ". Please choose an existing application")
+
+        # Initialize parameters, inputs and outputs
+        self._parameters = {}
+        self._inputs = {}
+        self._outputs = {}
+
+        self._available_params = self._app.GetParametersKeys()
+
+    def __del__(self):
+        """Remove the current application
+        """
+        self._app = None
+
+    def set_parameters(self, **kwargs):
+        """Set parameters (only integer/float/bool parameters)
+        """
+        # For each parameters
+        for param_name, param_value in kwargs.items():
+            # Check if the current parameters is available for the current application
+            if param_name not in self._available_params:
+                raise DiapOTBException(param_name + "is not available for " + self._app.GetName())
+
+            # Call the associated functions to set the current parameter
+            try:
+                if isinstance(param_value, int):
+                    self._app.SetParameterInt(param_name, param_value)
+                elif isinstance(param_value, float):
+                    self._app.SetParameterFloat(param_name, param_value)
+                elif isinstance(param_value, bool):
+                    self._app.SetParameterBool(param_name, param_value)
+                else:
+                    self._app.SetParameterString(param_name, param_value)
+            except:
+                raise DiapOTBException("Unexpected type for " + param_name)
+
+    def set_input_images(self, **kwargs):
+        """Set inputs (only string or image)
+        """
+        # For each inputs
+        for input_name, input_value in kwargs.items():
+            # Check if the current input is available for the current application
+            # (specific case for in_ to avoid the python keyword in)
+            if input_name not in self._available_params and input_name != "in_":
+                raise DiapOTBException(input_name + " is not available for " + self._app.GetName())
+
+            # Call the associated functions to set the current parameter, only two choices :
+            # files => str
+            # image => input_image
+            try:
+                if input_name == "in_":
+                    input_name = "in"
+                if isinstance(input_value, str):
+                    self._app.SetParameterString(input_name, input_value)
+                elif isinstance(input_value, list):
+                    self._app.SetParameterStringList(input_name, input_value)
+                else:
+                    self._app.SetParameterInputImage(input_name, input_value)
+            except:
+                raise DiapOTBException("Unexpected type for " + input_name)
+
+    def set_output_images(self, **kwargs):
+        """Set outputs (only string = files)
+        """
+        # For each inputs
+        for output_name, output_value in kwargs.items():
+            # Check if the current input is available for the current application
+            if output_name not in self._available_params:
+                raise DiapOTBException(output_name + "is not available for " + self._app.GetName())
+
+            # Call the associated functions to set the current parameter, only one choice :
+            # files => str
+            try:
+                if isinstance(output_value, str):
+                    self._app.SetParameterString(output_name, output_value)
+                else:
+                    raise DiapOTBException("Unexpected type for " + output_name)
+            except:
+                raise DiapOTBException("Unexpected type for " + output_name)
+
+    def get_output_image(self, out_name):
+        """Getter on output image
+        """
+        out_img = None
+        try:
+            out_img = self._app.GetParameterOutputImage(out_name)
+        except:
+            raise DiapOTBException("A problem occured getting the required output image")
+
+        return out_img
+
+    def get_output_float_parameter(self, out_name):
+        """Getter on an output parameter
+        """
+        out_param = 0.
+        try:
+            out_param = self._app.GetParameterFloat(out_name)
+        except:
+            raise DiapOTBException("A problem occured getting the required output parameter")
+
+        return out_param
+
+    def get_output_int_parameter(self, out_name):
+        """Getter on an output parameter
+        """
+        out_param = 0.
+        try:
+            out_param = self._app.GetParameterInt(out_name)
+        except:
+            raise DiapOTBException("A problem occured getting the required output parameter")
+
+        return out_param
+
+    def execute_app(self, in_memory=False):
+        """Execute OTB/DiapOTB application (in-memory or not)
+        """
+        if not self._app.IsApplicationReady():
+            raise DiapOTBException("Some mandatory arguments are missing for " + self._app.GetName() + \
+                                   " . Only the following arguments were provided :  " + str(self._app.GetParameters()))
+        if "ram" not in self._app.GetParameters():
+            print(self._app.GetParameters())
+            self._app.SetParameterString("ram", str(self.RAM))
+
+        if in_memory:
+            self._app.Execute()
+        else:
+            self._app.ExecuteAndWriteOutput()
diff --git a/python_src/lib/processing/core/ConfigFile.py b/python_src/lib/processing/core/ConfigFile.py
new file mode 100644
index 0000000000000000000000000000000000000000..1913bd191218fe2356b9fcd86be89bbba8564015
--- /dev/null
+++ b/python_src/lib/processing/core/ConfigFile.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+    ConfigFile
+    ==========
+
+    Handle, check the input configuration file
+
+"""
+import os
+import json
+from jsonschema import validate
+
+from .DiapOTBEnums import ExtendedEnum, ScriptNames, ChainNames
+from .DiapOTBExceptions import DiapOTBException
+from .Utils import logger, check_if_exist, check_if_dir
+
+# pylint: disable=too-few-public-methods
+# Specific enums
+
+class MainTag(ExtendedEnum):
+    """Specify the main tag for json file
+    """
+    GLOBAL = "Global"
+    PREPROCESSING = "Pre_Processing"
+    DINSAR = "DIn_SAR"
+    POSTPROCESSING = "Post_Processing"
+
+
+class MiddleTag(ExtendedEnum):
+    """Specify the main tag for json file
+    """
+    INPUT = "in"
+    OUTPUT = "out"
+    PARAMETER = "parameter"
+    SENSOR = "sensor"
+
+
+class ConfigParamPattern(ExtendedEnum):
+    """Pattern found in json file for parameters
+    """
+    MLRAN = "ML_range"
+    MLAZI = "ML_azimut"
+    MLGAIN = "ML_gain"
+    DOPFILE = "doppler_file"
+    GRIDSTEPRAN = "GridStep_range"
+    GRIDSTEPAZI = "GridStep_azimut"
+    GRIDTHES = "Grid_Threshold"
+    GRIDGAP = "Grid_Gap"
+    INTERFGAIN = "Interferogram_gain"
+    INTERFMLRAN = "Interferogram_mlran"
+    INTERFMLAZI = "Interferogram_mlazi"
+    WITHORTHO = "Activate_Ortho"
+    SPACINGXY = "Spacingxy"
+    WITHFILT = "Activate_Filtering"
+    FILTMLRAN = "Filtered_Interferogram_mlran"
+    FILTMLAZI = "Filtered_Interferogram_mlazi"
+    FILTALPHA = "Filtered_parameter_alpha"
+    ESDITER = "ESD_iter"
+    BURSTINDEX = "burst_index"
+
+
+class ConfigInputPattern(ExtendedEnum):
+    """Pattern found in json file for global inputs
+    """
+    MASTER = "Master_Image_Path"
+    SLAVE = "Slave_Image_Path"
+    DEM = "DEM_Path"
+    EOF = "EOF_Path"
+    OUTPUTDIR = "output_dir"
+
+
+# pylint: enable=too-few-public-methods
+
+class ConfigFile():
+    """Class to handle configuration file and prepare inputs/parameters for processing
+    """
+    def __init__(self, config_file, script_name):
+        self._config_file = config_file
+
+        if script_name not in ScriptNames.list():
+            print("Pouet")
+        self._script_name = script_name
+        self._data_config = {}
+
+    def _validate_with_schema(self, schema):
+        """
+           Compare input with JSON schame the configuration file
+        """
+        try:
+            validate(self._data_config, schema)
+        except Exception as valid_err:
+            print("Invalid JSON: {}".format(valid_err))
+            return False
+        else:
+            # Realise votre travail
+            print("Valid JSON")
+            return True
+
+    def _validate_json(self):
+        """Check json content following a schema
+        """
+        # Retrieve the schame following the current script :
+        # diapotb.py or diapot_S1IW.py or SARMulti_SLC*S1IW.py
+        schema_json = "schema_S1SM.json"
+        if self._script_name == str(ScriptNames.SIMPLE_S1IW):
+            schema_json = "schema_S1IW.json"
+        elif self._script_name == str(ScriptNames.MULTI_SLC_S1SM):
+            schema_json = "schema_MultiSlc.json"
+        elif self._script_name == str(ScriptNames.MULTI_SLC_S1IW):
+            schema_json = "schema_MultiSlc_IW.json"
+
+        # Load schema (relative path)
+        current_dir = os.path.dirname(os.path.abspath(__file__))
+        schema_path = os.path.join(current_dir, "../../../share/json_schemas/")
+
+        print(schema_path)
+
+        if os.path.exists(schema_path):
+            schema_file = os.path.join(schema_path, schema_json)
+
+            try:
+                with open(schema_file, "r") as sch:
+                    data_schema = json.load(sch)
+            except Exception as err:
+                logger.critical("Impossible to read or load JSON configuration schema : {err}. Check its path and content."\
+                                .format(err=self._config_file))
+                quit()
+
+            # Check Json file
+            json_is_valid = self._validate_with_schema(data_schema)
+
+            if not json_is_valid:
+                logger.critical("Error, the input config file does not fulfill requirements")
+                quit()
+
+    def load_configfile(self):
+        """
+        Read and Load the configuration file (check this file according to a schmema)
+        """
+        # Read and Load the configuration file
+        try:
+            with open(self._config_file, 'r') as f:
+                self._data_config = json.load(f)
+
+        except Exception as err:
+            logger.critical("Impossible to read or load JSON configuration file : {err}. Check its path and content."\
+                            .format(err=self._config_file))
+            quit()
+
+        self._validate_json()
+
+
+    def create_param_dict_from_config_file(self, param_enum, chain_name):
+        """ Build a parameter dictionary for a processing chain from configuratio file
+        """
+        # Checks argument
+        if not issubclass(param_enum, ExtendedEnum):
+            logger.critical("Wrong input enum to build the parameter dictionary")
+
+        if chain_name not in ChainNames.list():
+            logger.critical("Wrong input enum to build the parameter dictionary")
+
+        # empty dict
+        param_dict = {}
+
+        # Try to retrieve all parameters from the input configuration file
+        for param in param_enum:
+            if str(param) in ConfigParamPattern.list():
+                # first try on the chainName part
+                try:
+                    param_dict[str(param)] = self._data_config[chain_name][str(MiddleTag.PARAMETER)][str(param)]
+                except KeyError:
+                    # Additionnal tries on others chains
+                    for config_part in MainTag:
+                        if str(config_part) != chain_name:
+                            try:
+                                param_dict[str(param)] = self._data_config[str(config_part)][str(MiddleTag.PARAMETER)][str(param)]
+                            except KeyError:
+                                continue
+
+        return param_dict
+
+
+    def get_reference_image(self):
+        """Getter on reference image
+        """
+        reference_image = self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.INPUT)]["Master_Image_Path"]
+        if not check_if_exist(reference_image):
+            raise DiapOTBException("Reference image : %s does not exist" %reference_image)
+
+        return reference_image
+
+    def get_secondary_image(self):
+        """Getter on secondary image
+        """
+        secondary_image = self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.INPUT)]["Slave_Image_Path"]
+        if not check_if_exist(secondary_image):
+            raise DiapOTBException("Secondary image : %s does not exist" %secondary_image)
+
+        return secondary_image
+
+    def get_dem(self):
+        """Getter on dem
+        """
+        dem = self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.INPUT)]["DEM_Path"]
+        if not check_if_exist(dem):
+            raise DiapOTBException("dem : %s does not exist" %dem)
+
+        return dem
+
+    def get_eof(self):
+        """Getter on EOF Path
+        """
+        eof_path = ""
+        try:
+            eof_path = self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.INPUT)]["EOF_Path"]
+
+            if not check_if_dir(eof_path):
+                raise DiapOTBException("EOF_PATH : %s does not exist" %eof_path)
+
+        except KeyError:
+            print("EOF PATH does not exist for this configuration file")
+
+        return eof_path
+
+    def get_output_dir(self):
+        """Getter on output directory
+        """
+        return self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.OUTPUT)]["output_dir"]
+
+    def get_doppler_file(self):
+        """Getter on doppler file name
+        """
+        return self._data_config[str(MainTag.PREPROCESSING)][str(MiddleTag.OUTPUT)]["doppler_file"]
+
+
+    def get_burst_list(self):
+        """Getter on burst list (from configuration file)
+        """
+        # Default 0 to 8 (usally 9 bursts in S1IW image)
+        burst_list = [int(i) for i in range(0, 9)]
+        try:
+            burst_index = self._data_config[str(MainTag.GLOBAL)][str(MiddleTag.PARAMETER)]["burst_index"]
+            burst_list = [int(i) for i in burst_index.split("-")]
+
+        except KeyError:
+            print("burst_index does not exist for this configuration file")
+
+        # Check burst id
+        if min(burst_list) < 0 or max(burst_list) > 8:
+            raise DiapOTBException("burst index are not consistent for S1IW processing")
+
+
+        return burst_list
diff --git a/python_src/lib/processing/core/DiapOTBEnums.py b/python_src/lib/processing/core/DiapOTBEnums.py
new file mode 100644
index 0000000000000000000000000000000000000000..66f171ca0e1a1a16cfcedba6aca385252b60f108
--- /dev/null
+++ b/python_src/lib/processing/core/DiapOTBEnums.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Enums definitions for DiapOTB processings
+"""
+
+from enum import Enum
+
+
+# pylint: disable=too-few-public-methods
+def extend_enum(inherited_enum):
+    """Decorator to extend enum
+    """
+    def wrapper(added_enum):
+        joined = {}
+        for item in inherited_enum:
+            joined[item.name] = item.value
+        for item in added_enum:
+            joined[item.name] = item.value
+        return ExtendedEnum(added_enum.__name__, joined)
+    return wrapper
+
+
+class ExtendedEnum(Enum):
+    """Base class for DiapOTB Enums (used mainly to print available values)
+    """
+
+    @classmethod
+    def list(cls):
+        """Transform enum values to a list
+        """
+        return list(map(lambda c: c.value, cls))
+
+    @classmethod
+    def list_lower(cls):
+        """Transform enum values to a list
+        """
+        return list(map(lambda c: c.value.lower(), cls))
+
+    def __str__(self):
+        """Define a str functions for basic enums (with single strings as values)
+        """
+        return self.value
+
+class DefaultEnum(Enum):
+    """Base class to specify some default value for a few parameters
+    """
+    @classmethod
+    def list(cls):
+        """Transform enum values to a list
+        """
+        return list(map(lambda c: c.value[0], cls))
+
+    def bind_with_other_key(self, enum):
+        """Know if current Instance is bind with other key
+        """
+        is_bind = False
+        if self.value[1] in enum.list():
+            is_bind = True
+
+        return is_bind
+
+    def get_default_value_or_key(self):
+        """Get default value or the bind keys
+        """
+        return self.value[1]
+
+    def __str__(self):
+        """Get key
+        """
+        return self.value[0]
+    
+
+class RequiredKeysForSingleImageProcessing(ExtendedEnum):
+    """Class to define the required elements to build a processing with a single image
+    """
+    IMAGE = "image"
+    DIR = "image_dir"
+    PARAMETERS = "param"
+    OUTPUT_DIR = "output_dir"
+
+
+class RequiredKeysForDualImagesProcessing(ExtendedEnum):
+    """Class to define the required elements to build a processing with a couple
+    """
+    REFERENCE_IMAGE = "reference_image"
+    SECONDARY_IMAGE = "secondary_image"
+    REFERENCE_DIR = "reference_dir"
+    SECONDARY_DIR = "secondary_dir"
+    PARAMETERS = "param"
+    OUTPUT_DIR = "output_dir"
+
+
+class Sensor(ExtendedEnum):
+    """Define all available sensors
+    """
+    S1SM = "Sentinel-1 StriMap"
+    S1IW = "Sentinel-1 IW"
+    CSK = "Cosmo"
+
+
+class Satellite(ExtendedEnum):
+    """Define all available sensors/sat
+    """
+    S1A = "SENTINEL-1A"
+    S1B = "SENTINEL-1B"
+    CSK = "CSK"
+    TSX = "TSX-1"
+    PAZ = "PAZ-1"
+
+class ScriptNames(ExtendedEnum):
+    """Define the script names (represent the global processing by calling the main chains)
+    """
+    SIMPLE_S1SM = "diapotb"
+    SIMPLE_S1IW = "diapotb_S1IW"
+    MULTI_SLC_S1SM = "SARMulti_SLC"
+    MULTI_SLC_S1IW = "SARMulti_SLC_S1IW"
+
+
+class ChainNames(ExtendedEnum):
+    """Class to specify every chain names for DiapOTB
+    """
+    PRE_PROCESSING = "Pre_Processing"
+    GROUND = "Ground"
+    DINSAR = "DIn_SAR"
+    POST_PROCESSING = "Post_Processing"
+
+class ChainModes(ExtendedEnum):
+    """Class to specify every chain available mode in all chains.
+    Mainly related to sensor, for instance S1SM or S1IW.
+    """
+    S1_IW = "S1IW"
+    OTHERS = "Others"
+
+
+class ExtPosition(ExtendedEnum):
+    """Define the available postions for extension in filename
+    """
+    SUFIX = "sufix"
+    PREFIX = "prefix"
+    WHOLE = "whole"
+    EXCLUSIF = "exclusif"
+
+
+class FilenamesEnum(Enum):
+    """
+    Two str to speficy, a key and an extension to create the file
+    The extension could be set at the beginning (prefix), at the end (sufix) or
+    replace the current name (whole)
+    """
+    def __init__(self, key, ext, pos="sufix"):
+        self.key = key
+        self.ext = ext
+        self.pos = pos
+
+    def get_key(self):
+        """ Getter on key
+        """
+        return self.value[0]
+
+    def get_extension(self):
+        """ Getter on extension to add
+        """
+        return self.value[1]
+
+    def get_position(self):
+        """ Getter on position (where to set the given extension before/after the name or replace it)
+        """
+        try:
+            position = self.value[2]
+
+            if position not in ExtPosition.list():
+                position = "sufix"
+        except:
+            position = "sufix"
+        return position
+
+# pylint: enable=too-few-public-methods
diff --git a/python_src/lib/processing/core/DiapOTBExceptions.py b/python_src/lib/processing/core/DiapOTBExceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..18f58cd7cb6eefab4e09772b0b8c5671478a5f46
--- /dev/null
+++ b/python_src/lib/processing/core/DiapOTBExceptions.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Error definition for DiapOTB processings
+"""
+
+class DiapOTBException(Exception):
+    """Base class for errors in DiapOTB processings
+    """
+
+    def __init__(self, value):
+        self.value = value
+        super().__init__(value)
+
+    def __str__(self):
+        return repr(self.value)
diff --git a/python_src/lib/processing/core/DiapOTBProcessing.py b/python_src/lib/processing/core/DiapOTBProcessing.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a86e9b25f2cff0491dad3d809bf54be27e61e5b
--- /dev/null
+++ b/python_src/lib/processing/core/DiapOTBProcessing.py
@@ -0,0 +1,555 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+
+"""
+Base classes to define and create a processing
+"""
+
+
+from abc import ABCMeta, abstractmethod
+import os
+import inspect
+
+from .DiapOTBEnums import RequiredKeysForSingleImageProcessing, \
+    RequiredKeysForDualImagesProcessing, \
+    ChainModes, FilenamesEnum, ExtPosition, DefaultEnum
+from .DiapOTBExceptions import DiapOTBException
+
+from .Utils import adapt_image_format
+
+
+class DiapOTBProcessing(metaclass=ABCMeta):
+    """Abstract/Base class to define standard processing
+    """
+
+    def __init__(self, **kwargs):
+        # Init the specific arguments
+        self._name = "Base Processing"
+
+        # Default mode
+        self._mode = ChainModes.OTHERS
+
+        # Empty arguments
+        self._applications = []
+        self._dict_outputs = {}
+        self._dict_inputs = {}
+
+        # Handlers and Executor as composition (bridge)
+        # To check parameters and execute appplications following the mode and chain
+        self.param_handler = None
+        self.file_handler = None
+
+        self._inputs_list = []
+
+        self._executor_builder = ExecutorBuilder()
+        self._input_enum = None
+
+    def append_inputs(self, dict_to_add):
+        """Append the key/value from dict_to_add into dict_inputs
+        Add only those needed (speficied in input_list)
+        """
+        for in_key, in_elt in dict_to_add.items():
+            if in_key in self._inputs_list:
+                self._dict_inputs[in_key] = in_elt
+
+    @abstractmethod
+    def retrieve_output(self, key):
+        """Retrieve a specific output with a key
+        """
+
+    def get_outputs(self):
+        """Retrieve the output dictionary (with all outputs)
+        """
+        return self._dict_outputs
+
+    @abstractmethod
+    def execute(self, **kwargs):
+        """Execute the current processing according a given modes (sensor)
+        """
+
+    def info_chain(self):
+        """Print all information about the current processing chain
+        """
+
+
+class DiapOTBProcessingSingleImage(DiapOTBProcessing, metaclass=ABCMeta):
+    """Abstract/Base class to define standard processing
+    """
+
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+
+        # Check then assign global arguments : output_dir, reference_image ...
+        self._check_required_elts(**kwargs)
+
+        self._image = kwargs[str(RequiredKeysForSingleImageProcessing.IMAGE)]
+        self._image_dir = kwargs[str(RequiredKeysForSingleImageProcessing.DIR)]
+        self._output_dir = kwargs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)]
+        self._param = kwargs[str(RequiredKeysForSingleImageProcessing.PARAMETERS)]
+
+        print("self._image_dir : " + self._image_dir)
+        print("self._image : " + self._image)
+
+        # Get the image base (split ? to handle extended filename)
+        self._image_base = os.path.splitext(os.path.basename(self._image.split("?")[0]))[0]
+        ext = os.path.splitext(os.path.basename(self._image.split("?")[0]))[1]
+        print(self._image_base + ext)
+
+        # First of all, check if image and directories exist
+        if not os.path.exists(self._image_dir) or \
+           not os.path.exists(os.path.join(self._image_dir, self._image_base + ext)) or \
+           not os.path.exists(self._output_dir):
+            raise DiapOTBException("Input image or one of directories do not exist for " + self._name +
+                                   ". Please, check the given paths")
+
+
+
+    def _check_required_elts(self, **kwargs):
+        """Check if all required elts were provided
+        """
+        # Required elements
+        check_required_elts = all(elt in kwargs
+                                  for elt in RequiredKeysForSingleImageProcessing.list())
+
+        if not check_required_elts:
+            raise DiapOTBException("Missing some required inputs to instantiate the current processing : " +
+                                   self._name + "Only : " + kwargs.keys() + " were provided")
+
+
+    def execute(self, **kwargs):
+
+        # Gather self._dict_input with kwargs
+        self._dict_inputs.update(kwargs)
+
+        # Check arguments and init executor
+        # Checks arguments
+        if self._input_enum is not None:
+            check_required_elts = all(elt in self._dict_inputs
+                                      for elt in self._input_enum.list())
+
+            if not check_required_elts:
+                raise DiapOTBException("Missing some required inputs to execute the current processing : " +
+                                       self._name + ". Only : " + str(self._dict_inputs) + " were provided")
+
+        # Build the executor
+        self._executor_builder.add_element("param", self.param_handler)\
+                              .add_element("filename", self.file_handler)
+        executor = self._executor_builder.build(self._mode)
+
+        # Add inputs
+        executor.inputs = self._dict_inputs
+        executor.inputs[str(RequiredKeysForSingleImageProcessing.IMAGE)] = self._image
+        executor.inputs[str(RequiredKeysForSingleImageProcessing.DIR)] = self._image_dir
+        executor.inputs[str(RequiredKeysForSingleImageProcessing.OUTPUT_DIR)] = self._output_dir
+
+        # Execute the processing
+        executor.execute()
+
+        # Get outputs
+        self._dict_outputs = executor.outputs
+
+
+class DiapOTBProcessingDualImages(DiapOTBProcessing, metaclass=ABCMeta):
+    """Abstract/Base class to define standard processing with two images (ie : DIn-SAR)
+    """
+
+    def __init__(self, **kwargs):
+        super().__init__(**kwargs)
+
+        # Check then assign global arguments : output_dir, reference_image ...
+        self._check_required_elts(**kwargs)
+
+        self._reference_image = kwargs[str(RequiredKeysForDualImagesProcessing.REFERENCE_IMAGE)]
+        self._secondary_image = kwargs[str(RequiredKeysForDualImagesProcessing.SECONDARY_IMAGE)]
+        self._reference_dir = kwargs[str(RequiredKeysForDualImagesProcessing.REFERENCE_DIR)]
+        self._secondary_dir = kwargs[str(RequiredKeysForDualImagesProcessing.SECONDARY_DIR)]
+        self._output_dir = kwargs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)]
+        self._param = kwargs[str(RequiredKeysForDualImagesProcessing.PARAMETERS)]
+
+        # Get image base for reference and secondary (split ? to handle extended filename)
+        self._reference_base = os.path.splitext(os.path.basename(self._reference_image.split("?")[0]))[0]
+        self._secondary_base = os.path.splitext(os.path.basename(self._secondary_image.split("?")[0]))[0]
+
+        # Two file handler : for reference and secondary images
+        self.file_handler_reference = None
+        self.file_handler_secondary = None
+
+    def _check_required_elts(self, **kwargs):
+        """Check if all required elts were provided
+        """
+        # Required elements
+        check_required_elts = all(elt in kwargs
+                                  for elt in RequiredKeysForDualImagesProcessing.list())
+
+        if not check_required_elts:
+            raise DiapOTBException("Missing some required inputs to instantiate the current processing : " +
+                                   self._name + "Only : " + kwargs.keys() + " were provided")
+
+        return check_required_elts
+
+    def append_inputs_reference(self, dict_to_add):
+        """Append the key/value from dict_to_add into dict_inputs for reference image
+        Add only those needed (speficied in input_list)
+        """
+        for in_key, in_elt in dict_to_add.items():
+            # Change in_key to tag it as reference
+            in_key += "_reference"
+            if in_key in self._inputs_list:
+                self._dict_inputs[in_key] = in_elt
+
+    def append_inputs_secondary(self, dict_to_add):
+        """Append the key/value from dict_to_add into dict_inputs for secondary image
+        Add only those needed (speficied in input_list)
+        """
+        for in_key, in_elt in dict_to_add.items():
+            # Change in_key to tag it as reference
+            in_key += "_secondary"
+            if in_key in self._inputs_list:
+                self._dict_inputs[in_key] = in_elt
+
+    def execute(self, **kwargs):
+
+        # Gather self._dict_input with kwargs
+        self._dict_inputs.update(kwargs)
+
+        # Check arguments and init executor
+        # Checks arguments
+        check_required_elts = all(elt in self._dict_inputs
+                                  for elt in self._input_enum.list())
+
+        if not check_required_elts:
+            raise DiapOTBException("Missing some required inputs to execute the current processing : " +
+                                   self._name + ". Only : " + str(self._dict_inputs) + " were provided")
+
+        # Build the executor
+        self._executor_builder.add_element("param", self.param_handler).\
+            add_element("filename_reference", self.file_handler_reference).\
+            add_element("filename_secondary", self.file_handler_secondary)
+        executor = self._executor_builder.build(self._mode)
+
+        # Add inputs
+        executor.inputs = self._dict_inputs
+        executor.inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_IMAGE)] = self._reference_image
+        executor.inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_DIR)] = self._reference_dir
+        executor.inputs[str(RequiredKeysForDualImagesProcessing.SECONDARY_IMAGE)] = self._secondary_image
+        executor.inputs[str(RequiredKeysForDualImagesProcessing.SECONDARY_DIR)] = self._secondary_dir
+        executor.inputs[str(RequiredKeysForDualImagesProcessing.OUTPUT_DIR)] = self._output_dir
+
+        # Execute the processing
+        executor.execute()
+
+        # Get outputs
+        self._dict_outputs = executor.outputs
+
+
+# Aditional classes to handle application execution, paramaters and filenames
+class ParamHandler():
+    """Base class to handle paramaters
+    Check if all required parameters were provided
+    """
+    def __init__(self, enum_class, param, enum_default=None):
+        # Instanciate arguments
+        self._enum_param = enum_class
+        self._param = param
+
+        self._enum_default = enum_default
+        if enum_default is not None:
+            if not issubclass(enum_default, DefaultEnum):
+                raise DiapOTBException("Wrong parameters for ParamHandler, must be an Enum class")
+
+        # Check the arguments types : enum and dict
+        if not inspect.isclass(self._enum_param):
+            raise DiapOTBException("Wrong parameters for ParamHandler, must be an Enum class")
+
+        if not isinstance(self._param, dict):
+            raise DiapOTBException("Wrong parameters type, must be a dict")
+
+    def check_param(self):
+        """Check if all required paramaters are provided thanks to input enum as template
+        """
+        # Put some value default if some paramaters were not set
+        if self._enum_default is not None:
+            for elt in self._enum_default:
+                # Check if elt is already in param, if not put the default value
+                if str(elt) not in self._param:
+                    if elt.bind_with_other_key(self._enum_param):
+                        self._param[str(elt)] = self._param[elt.get_default_value_or_key()]
+                    else:
+                        self._param[str(elt)] = elt.get_default_value_or_key()
+
+        print(self._param)
+        print(self._enum_param.list())
+
+        # Check parameters
+        check_required_param = all(elt in self._param
+                                   for elt in self._enum_param.list())
+
+        if not check_required_param:
+            raise DiapOTBException("Check param failed for : "
+                                   ". Please, check the input parameters")
+
+        print(self._param)
+
+        return check_required_param
+
+    def get_param(self, key):
+        """Getter on param
+        """
+        param = False
+        if key in self._param:
+            param = self._param[key]
+        else:
+            raise DiapOTBException("param dictionary does not contain : " + key)
+
+        return param
+
+
+class FilenamesHandler():
+    """Base class to handle filenames (intermediate/output files)
+    """
+    def __init__(self, enum_class, image_base, mode):
+        # Instanciate arguments
+        self._enum_files = enum_class
+        self._mode = ChainModes.OTHERS
+        # Check mode
+        if isinstance(mode, ChainModes) or str(mode) in ChainModes.list():
+            self._mode = mode
+        self._image = image_base
+
+        # Empty filenames
+        self._filenames = {}
+
+        # Check the arguments types : enum
+        if not issubclass(self._enum_files, FilenamesEnum):
+            raise DiapOTBException("Wrong parameters for FilenamesHandler, must be an Enum class")
+
+    def create_intermediate_names(self, **kwargs):
+        """Create all intermediate names used in the current processing from reference/secondary info
+        """
+        # TODO : Bind names with utils functions :
+        # get_slcml_namming_from_productname and get_interfnamming_from_productname
+
+        # Fix the image_base according to the mode
+        # image keeps the same except for S1IW with _burst_burst_id
+        image = self._image
+        if str(self._mode) == str(ChainModes.S1_IW):
+            burst_id = 1
+            if "burst_id" in kwargs:
+                burst_id = kwargs["burst_id"]
+
+            image = self._image + "_burst" + str(burst_id)
+
+        # Loop on Enums to create filenames
+        for filename in self._enum_files:
+            ext = filename.get_extension()
+            # Specific case for ml files (get ml factors)
+            if "ml" in filename.get_extension():
+                ml_azi = 3
+                ml_ran = 3
+                if "ml_azimut" in kwargs:
+                    ml_azi = kwargs["ml_azimut"]
+                if "ml_range" in kwargs:
+                    ml_ran = kwargs["ml_range"]
+
+                ext += str(ml_azi) + str(ml_ran)
+
+            # Get position to set the given extension (sufix/prefix or replace the current name)
+            if filename.get_position() in [str(ExtPosition.WHOLE), str(ExtPosition.EXCLUSIF)]:
+                if str(self._mode) == str(ChainModes.S1_IW) and \
+                   filename.get_position() in [str(ExtPosition.WHOLE)]:
+                    ext += "_burst" + burst_id
+                # Replace the name by the extension
+                current_file = ext + ".tif"
+            elif filename.get_position() == str(ExtPosition.PREFIX):
+                current_file = ext + image + ".tif"
+            else:
+                current_file = image + ext + ".tif"
+
+            # Assign dictionary
+            self._filenames[str(filename.get_key())] = current_file
+
+        print(self._filenames)
+
+    def get_filename(self, key):
+        """Getter on filename
+        """
+        filename = ""
+        if key in self._filenames:
+            filename = self._filenames[key]
+        else:
+            raise DiapOTBException("filename dictionary does not contain : " + str(key))
+
+        return filename
+
+    def add_extension_to_filename(self, key, ext):
+        """Add an extension to a given filename (key)
+        """
+        if key in self._filenames:
+            # Split the current filename to avoid .tif extension and add the given ext
+            self._filenames[key] = self._filenames[key].split(".tif")[0] + ext + ".tif"
+        else:
+            raise DiapOTBException("filename dictionary does not contain : " + str(key))
+
+
+    @property
+    def filenames(self):
+        """Getter on all filenames
+        """
+        return self._filenames
+
+
+class Executor(metaclass=ABCMeta):
+    """Base class to run applications and retrive outputs
+    """
+    def __init__(self, param_handler, file_handler):
+        # Composition of param_handler and file_handler
+        self.param_handler = param_handler
+        self.file_handler = file_handler
+
+        # Empty inputs/outputs
+        self._inputs = {}
+        self._outputs = {}
+
+        # Empty list of applications
+        self._appplications = []
+
+    @abstractmethod
+    def check_inputs(self):
+        pass
+
+    @abstractmethod
+    def execute(self):
+        pass
+
+    @property
+    def outputs(self):
+        return self._outputs
+
+    @property
+    def inputs(self):
+        return self._inputs
+
+    @inputs.setter
+    def inputs(self, input_dict):
+        self._inputs = input_dict
+
+
+class ExecutorSingleImage(Executor, metaclass=ABCMeta):
+    """Base class to run applications and retrive outputs on a single image
+    """
+    def image_path(self):
+        """Gather and adapt format to get image path
+        """
+        image_dir = self._inputs[str(RequiredKeysForSingleImageProcessing.DIR)]
+        image_in = self._inputs[str(RequiredKeysForSingleImageProcessing.IMAGE)]
+
+        # adapt_image_format to handle h5 dataset (CSK sensor)
+        return adapt_image_format(os.path.join(image_dir, image_in))
+
+    def check_inputs(self, **kwargs):
+        """Check global inputs to execute a single image
+        """
+        # Loop on required inputs (exclude parameters)
+        check_required_elts = all(elt in kwargs or elt == str(RequiredKeysForSingleImageProcessing.PARAMETERS)
+                                  for elt in RequiredKeysForSingleImageProcessing.list())
+
+        if not check_required_elts:
+            raise DiapOTBException("Missing some required inputs to instantiate the current executor")
+
+
+class ExecutorDualImages(Executor, metaclass=ABCMeta):
+    """Base class to run applications and retrive outputs on a couple image
+    """
+    def __init__(self, param_handler, file_handler_reference,
+                 file_handler_secondary):
+        super().__init__(param_handler, None)
+        self.file_handler_reference = file_handler_reference
+        self.file_handler_secondary = file_handler_secondary
+
+    def reference_path(self):
+        """Gather and adapt format to get reference image path
+        """
+        reference = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_IMAGE)]
+        reference_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.REFERENCE_DIR)]
+
+        # adapt_image_format to handle h5 dataset (CSK sensor)
+        return adapt_image_format(os.path.join(reference_dir, reference))
+
+    def secondary_path(self):
+        """Gather and adapt format to get reference image path
+        """
+        secondary = self._inputs[str(RequiredKeysForDualImagesProcessing.SECONDARY_IMAGE)]
+        secondary_dir = self._inputs[str(RequiredKeysForDualImagesProcessing.SECONDARY_DIR)]
+
+        # adapt_image_format to handle h5 dataset (CSK sensor)
+        return adapt_image_format(os.path.join(secondary_dir, secondary))
+
+    def check_inputs(self, **kwargs):
+        """Check global inputs to execute two images
+        """
+        # Loop on required inputs (exclude parameters)
+        check_required_elts = all(elt in kwargs or elt == str(RequiredKeysForSingleImageProcessing.PARAMETERS)
+                                  for elt in RequiredKeysForDualImagesProcessing.list())
+
+        if not check_required_elts:
+            raise DiapOTBException("Missing some required inputs to instantiate the current executor")
+
+
+class ExecutorBuilder():
+    """Builder of Executors
+    """
+    def __init__(self):
+        self._elements = {}
+        self._executor_per_mode = {}
+
+    def add_mode(self, mode, executor):
+        """Add a mode to build the following executor
+        """
+        # Check arguments
+        if not isinstance(mode, ChainModes) and mode not in ChainModes.list():
+            raise DiapOTBException("Given mode is unkwown")
+
+        if not issubclass(executor, Executor):
+            raise DiapOTBException("Given executor is not an Executor")
+
+        # Add the executor class to our dictionary
+        self._executor_per_mode[str(mode)] = executor
+
+    def add_element(self, key, elt):
+        """Add element to build the executor
+        """
+        # Check elt type
+        if not isinstance(elt, ParamHandler) and \
+           not isinstance(elt, FilenamesHandler):
+            raise DiapOTBException("element to build executor must \
+            be param or filename handler")
+
+        # TODO, check key and persist available key names into an Enum
+        self._elements[key] = elt
+
+        return self
+
+    def build(self, mode):
+        """Return an instance of executor
+        """
+        # Sort the elements according to the key
+        try:
+            param_handler = self._elements["param"]
+            filename_handler_reference = None
+            filename_handler_secondary = None
+            if "filename_reference" in self._elements and \
+               "filename_secondary" in self._elements:
+                filename_handler_reference = self._elements["filename_reference"]
+                filename_handler_secondary = self._elements["filename_secondary"]
+            else:
+                filename_handler_reference = self._elements["filename"]
+        except KeyError:
+            raise DiapOTBException("Missing elements to build the executor")
+
+        arguments = [param_handler, filename_handler_reference]
+        if filename_handler_secondary is not None:
+            arguments.append(filename_handler_secondary)
+
+        # Build the instance following the mode
+        return self._executor_per_mode[str(mode)](*arguments)
diff --git a/python_src/lib/processing/core/Utils.py b/python_src/lib/processing/core/Utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed5aeb798d9d5e7c4e4eb0937d434ba1793ec439
--- /dev/null
+++ b/python_src/lib/processing/core/Utils.py
@@ -0,0 +1,877 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
+#
+# This file is part of Orfeo Toolbox
+#
+#     https://www.orfeo-toolbox.org/
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+    func_utils module
+    ==================
+
+    Pool of functions for logger, checks, image operations ...
+
+"""
+
+
+# Imports
+import logging
+import os
+import sys
+import argparse
+import re
+import datetime
+import time
+import xml.etree.ElementTree as ET
+import h5py
+
+from .DiapOTBExceptions import DiapOTBException
+from .DiapOTBEnums import Satellite
+
+import otbApplication as otb
+
+try:
+    import gdal
+    import osr
+    import ogr
+except ImportError:
+    import osgeo.gdal as gdal
+    import osgeo.osr as osr
+    import osgeo.ogr as ogr
+
+
+from .DiapOTBEnums import Sensor, ScriptNames
+
+# Streamer to our log file
+class StreamToLogger(object):
+    """
+    Fake file-like stream object that redirects writes to a logger instance.
+    """
+    def __init__(self, logger, log_level=logging.INFO):
+        self.logger = logger
+        self.log_level = log_level
+
+    def write(self, buf):
+        """Write into log file
+        """
+        for line in buf.rstrip().splitlines():
+            self.logger.log(self.log_level, line.rstrip())
+
+    def flush(self):
+        """Flush into log file
+        """
+        for handler in self.logger.handlers:
+            handler.flush()
+
+# Global variables for logger and std_out
+logger = logging.getLogger(__name__)
+LOG_FORMATTER = logging.Formatter('%(filename)s :: %(levelname)s :: %(message)s')
+STEARMER = StreamToLogger(logger, logging.INFO)
+STDOUT_SAVE = STEARMER
+
+### Functions for logger ###
+def init_logger():
+    """
+        Init logger with a stream handler.
+    """
+    logger.setLevel(logging.INFO)
+
+    # Create console handler with a high log level (warning level)
+    stream_handler = logging.StreamHandler()
+    stream_handler.setLevel(logging.WARNING)
+
+    # Add Handlers
+    logger.addHandler(stream_handler)
+
+
+def init_filelog(output_dir):
+    """
+        Init logger with a file handler (info.log).
+        the standard ouput is redirected into this file handler. The std was saved into STDOUT_SAVE.
+    """
+    # File handler for the logger
+    # Create file handler which logs even info messages (used as stdout redirection)
+    file_handler = logging.FileHandler(os.path.join(output_dir, 'info.log'), 'a')
+    file_handler.setLevel(logging.INFO)
+    file_handler.setFormatter(LOG_FORMATTER)
+
+    # Add Handlers
+    logger.addHandler(file_handler)
+
+    # Redirect stdout and stderr to logger
+    stdout_save_write = sys.stdout.write # Save stdout.write to print some info into the console
+    stdout_save_flush = sys.stdout.flush # Save stdout.flush to print some info into the console
+    sys.stdout.write = STEARMER.write # Replace stdout.write by our StreamToLogger
+    sys.stdout.flush = STEARMER.flush # Replace stdout.flush by our StreamToLogger
+    #STDOUT_SAVE = STEARMER # Different object
+    STDOUT_SAVE.write = stdout_save_write # Restore stdout.write into STDOUT_SAVE
+    STDOUT_SAVE.flush = stdout_save_flush # Restore stdout.write into STDOUT_SAVE
+
+
+def log(level, msg):
+    """
+        Transfer the msg to our logger with the required level.
+    """
+    logger.log(level, msg)
+
+
+def print_on_std(msg):
+    """
+        Transfer the msg to our stdout => real print on standard output.
+    """
+    # Print on STDOUT_SAVE aka the original/true stdout
+    print(msg, file=STDOUT_SAVE)
+
+
+### Functions for image format ###
+def check_image_format(image):
+    """Return the SLC dataset from h5 format
+    """
+    image_name = os.path.basename(image)
+    ext = image_name.split(".")[-1]
+
+    # Check extension
+    available_ext = ["tif", "tiff", "h5", "cosar"]
+    if ext not in available_ext:
+        raise DiapOTBException("Image extension is not available in DiapOTB. Available extension are : " +
+                               str(available_ext))
+
+def adapt_image_format(image):
+    """Check format : only tiff, h5 and cosar are available
+    Return the SLC dataset from h5 format
+    """
+    output_image = image
+    image_name = os.path.basename(image)
+    ext = image_name.split(".")[-1]
+
+    # Adapt image if h5 format to retrieve the dedicated dataset
+    if ext == "h5":
+        image_h5 = h5py.File(image, 'r')
+        ldataset = list(image_h5.keys())
+
+
+        if len(ldataset) != 1 and ldataset[0] != "S01":
+            raise DiapOTBException("Error, H5 input files does not contain the expected dataset")
+
+        slc_dataset = dict(image_h5['S01'])
+
+        if not 'SBI' in slc_dataset:
+            raise DiapOTBException("Error, H5 input files does not contain the expected dataset")
+
+        # Change the name of image to read directly the //S01/SBI
+        output_image = "HDF5:" + output_image + "://S01/SBI"
+
+    return output_image
+
+### Functions for metadata ###
+def get_image_kwl(image):
+    """
+        Retrieve keyword list from an image thanks to ReadImageInfo
+    """
+    # adapt image, if needed (for h5)
+    image = adapt_image_format(image)
+
+    # Retrieve some information about our input images with ReadImageInfo application
+    app_read_image_info = otb.Registry.CreateApplication("ReadImageInfo")
+    app_read_image_info.SetParameterString("in", image)
+    app_read_image_info.SetParameterString("keywordlist", "true")
+    app_read_image_info.Execute()
+
+    keywordlist = app_read_image_info.GetParameterString("keyword").split("\n")
+    keywordlist = filter(None, keywordlist)
+    dict_kwl = {i.split(':')[0] : re.sub(r"[\n\t\s]*", "", "".join(i.split(':')[1:]))
+                for i in keywordlist}
+
+    return dict_kwl
+
+def get_sensor_from_kwl(dict_kwl):
+    """Get sensor Id and check if sensor is available for DiapOTB
+    """
+    if not isinstance(dict_kwl, dict):
+        logger.warning("input dict_kwl is not a dict-like")
+        return 1
+
+    try:
+        sensor = dict_kwl["sensor"]
+    except KeyError:
+        raise DiapOTBException("Unknown sensor. Only CSK, S1 or TSX/PAZ are available")
+
+    print(sensor)
+
+    if sensor not in Satellite.list():
+        raise DiapOTBException("Unknown sensor. Only CSK, S1 or TSX/PAZ are available")
+
+    return sensor
+
+def get_dem_information(dem):
+    """
+        Retrieve DEM information thanks to ReadImageInfo
+    """
+    # Get information about DEM (spacing, size ..)
+    ReadDEMInfo = otb.Registry.CreateApplication("ReadImageInfo")
+    ReadDEMInfo.SetParameterString("in", dem)
+    ReadDEMInfo.SetParameterString("keywordlist", "true")
+    ReadDEMInfo.Execute()
+
+    dict_dem_information = {}
+    dict_dem_information['spacingXDEM'] = ReadDEMInfo.GetParameterFloat("spacingx")
+    dict_dem_information['estimatedGroundSpacingXDEM'] = ReadDEMInfo.GetParameterFloat("estimatedgroundspacingx")
+    dict_dem_information['spacingYDEM'] = ReadDEMInfo.GetParameterFloat("spacingy")
+    dict_dem_information['estimatedGroundSpacingYDEM'] = ReadDEMInfo.GetParameterFloat("estimatedgroundspacingy")
+
+    return dict_dem_information
+
+
+### Functions for image/file selection
+def get_img_from_safe(arg, search_dir="."):
+    """
+        Retrive selected image from a SAFE directory
+    """
+    img = []
+    for root, _, files in os.walk(search_dir):
+        for i in (i for i in files if i == arg):
+            img.append(os.path.join(root, i))
+            img = str("".join(img))
+            return img
+
+def get_img_from_dir(arg, search_dir="."):
+    """
+        Retrive selected image from a directory (for Cosmo sensor)
+    """
+    img = []
+    for root, _, files in os.walk(search_dir):
+        for i in (i for i in files if i == arg):
+            img.append(os.path.join(root, i))
+            img = str("".join(img))
+            return img
+
+def check_image_pattern(img, mode=str(Sensor.S1SM)):
+    """
+        Check pattern for current image. Must be cohetrent according to the sensor and mode
+    """
+
+    correct_pattern = False
+
+
+    if mode == str(Sensor.S1SM):
+        # Mode S1 SM : mmm-bb-ttt-pp-yyyymmddthhmmss-yyyymmddthhmmss*
+        # mmm : Mission identifier (s1a or s1b)
+        # bb : Mode/Beam (s1-s6 for SM)
+        # ttt : Product type (always slc here)
+        # pp : Polarisations (2 letters : hh or vv or vh or hv)
+        # yyyymmddthhmmss : Product start/stop date and times
+        # (14 digits representing the date and time separated by the character "t")
+        # * : Others representations such as orbits number or images number ...
+        pattern = "".join(["s1.", "-", '\\w{1}', '\\d', "-slc-", '\\w{2}', "-",
+                           '\\d{8}', "t", '\\d{6}', "-", '\\d{8}', "t", '\\d{6}'])
+
+        if re.match(pattern, img):
+            correct_pattern = True
+
+    elif mode == str(Sensor.CSK):
+        # Mode Cosmo : CSKS<i>_*_<YYYYMMDDhhmmss>_<YYYYMMDDhhmmss>
+        # i : Identifier of the satellite (1, 2, 3 or 4)
+        # YYYYMMDDhhmmss : Product start/stop date and times
+        # * : Others representations such as identifier for orbit direction or look side
+        pattern = "".join(["CSKS", '\\d'])
+
+        if re.match(pattern, img):
+            pattern_dates = "".join(['\\d{14}', "_", '\\d{14}'])
+            dates = re.findall(pattern_dates, img)
+
+            if len(dates) == 1:
+                correct_pattern = True
+
+    elif mode == str(Sensor.S1SM):
+        # Mode S1 IW : mmm-bb-ttt-pp-yyyymmddthhmmss-yyyymmddthhmmss*
+        # mmm : Mission identifier (s1A or s1B)
+        # bb : Mode/Beam (iw1-iw3 for IW)
+        # ttt : Product type (always slc here)
+        # pp : Polarisations (2 letters : hh or vv or vh or hv)
+        # yyyymmddthhmmss : Product start/stop date and times
+        # (14 digits representing the date and time separated by the character "t")
+        # * : Others representations such as orbits number or images number ...
+        pattern = "".join(["s1.", "-", '\\w{2}', '\\d', "-slc-", '\\w{2}', "-",
+                           '\\d{8}', "t", '\\d{6}', "-", '\\d{8}', "t", '\\d{6}'])
+
+        if re.match(pattern, img):
+            correct_pattern = True
+
+    else:
+        logger.exception("Unknown sensor in check_image_pattern")
+
+    return correct_pattern
+
+
+def get_slcml_namming_from_productname(product_name, mode=str(Sensor.S1SM)):
+    """
+        Get correct names (with conventions) for SLC and ML outputs (without extension)
+    """
+
+    # At the end, the name for slc and ml images must be :
+    # fullsensor_subwath_product_pol_UTCfirstdate with
+    # fullsensor : S1A/B for S1 or CSK for Cosmo
+    # subwath : for instance, s4 for S1SM and Cosmo or iw1 for S1 IW
+    # product : SCS_U/B for Cosmo or SLC for S1
+    # UTCdate : date with YYYYMMDDthhmmss format
+    slc_ml_name = ""
+
+    if mode == str(Sensor.CSK):
+        # Mode Cosmo : CSKS<i>_SCS_U/B_Sk_*_pol_<YYYYMMDDhhmmss>_<YYYYMMDDhhmmss>
+        # i : Identifier of the satellite (1, 2, 3 or 4)
+        # Sk : Mode/Beam (s1-s6)
+        # pol : Polarisations (HH or VV or VH or HV)
+        # YYYYMMDDhhmmss : Product start/stop date and times
+        # * : Others representations such as identifier for orbit direction or look side
+        productname_list = product_name.split("_")
+
+        slc_ml_name = productname_list[0][:3] + "_" + productname_list[3] + "_" + \
+                    productname_list[1] + productname_list[2] + "_" + productname_list[5] + \
+                    "_" + productname_list[8][:8] + "t" + productname_list[8][8:]
+
+    elif mode in [str(Sensor.S1SM), str(Sensor.S1IW)]:
+        # Mode S1 IW : mmm-bb-ttt-pp-yyyymmddthhmmss-yyyymmddthhmmss*
+        # mmm : Mission identifier (s1A or s1B)
+        # bb : Mode/Beam (iw1-iw3 for IW or s1-s6)
+        # ttt : Product type (always slc here)
+        # pp : Polarisations (2 letters : hh or vv or vh or hv)
+        # yyyymmddthhmmss : Product start/stop date and times
+        # (14 digits representing the date and time separated by the character "t")
+        # * : Others representations such as orbits number or images number ...
+        productname_list = product_name.split("-")
+
+        slc_ml_name = productname_list[0] + "_" + productname_list[1] + "_" + \
+                    productname_list[2] + "_" + productname_list[3] + "_" + \
+                    productname_list[4]
+
+    else:
+        logger.exception("Unknown sensor in check_image_pattern")
+
+    # Return output name without extension or ml factors
+    return slc_ml_name.lower()
+
+
+
+def get_interfnamming_from_productname(product_master_name, product_slave_name, mode=str(Sensor.S1SM)):
+    """
+        Get correct names (with conventions) for interferogram outputs (without extension)
+    """
+    # At the end, the name for interf images must be :
+    # sensor_M_UTCfirstdatemaster_S_* UTCfirstdateslave with
+    # sensor : S1 for S1 or CSK for Cosmo
+    # UTCdate : date with YYYYMMDDthhmss format
+    interf_name = ""
+
+    if mode == str(Sensor.CSK):
+        # Mode Cosmo : CSKS<i>_SCS_U/B_Sk_*_pol_<YYYYMMDDhhmmss>_<YYYYMMDDhhmmss>
+        # i : Identifier of the satellite (1, 2, 3 or 4)
+        # Sk : Mode/Beam (s1-s6)
+        # pol : Polarisations (HH or VV or VH or HV)
+        # YYYYMMDDhhmmss : Product start/stop date and times
+        # * : Others representations such as identifier for orbit direction or look side
+        product_m_list = product_master_name.split("_")
+        product_s_list = product_slave_name.split("_")
+
+        interf_name = product_m_list[0][:3] + "_M_" +  \
+                      product_m_list[8][:8] + "t" + product_m_list[8][8:] + "_S_" + \
+                      product_s_list[8][:8] + "t" + product_s_list[8][8:]
+
+    elif mode in [str(Sensor.S1SM), str(Sensor.S1IW)]:
+        # Mode S1 IW : mmm-bb-ttt-pp-yyyymmddthhmmss-yyyymmddthhmmss*
+        # mmm : Mission identifier (s1A or s1B)
+        # bb : Mode/Beam (iw1-iw3 or s1-s6 for IW)
+        # ttt : Product type (always slc here)
+        # pp : Polarisations (2 letters : hh or vv or vh or hv)
+        # yyyymmddthhmmss : Product start/stop date and times
+        # (14 digits representing the date and time separated by the character "t")
+        # * : Others representations such as orbits number or images number ...
+        product_m_list = product_master_name.split("-")
+        product_s_list = product_slave_name.split("-")
+
+        interf_name = product_m_list[0][:2].upper() + "_M_" + \
+                      product_m_list[4]  + "_S_" + \
+                      product_s_list[4]
+
+    else:
+        logger.exception("Unknown sensor in check_image_pattern")
+
+    # Return output name without extension or ml factors
+    return interf_name
+
+
+# EOF function for fine orbits
+def metadata_correction_with_fine_orbits(image, eof_path_file, output_dir):
+    """Create a new geom file with fine orbit thanks to SARMetadataCorrection application
+    """
+    # Name of new geom
+    image_base = os.path.splitext(os.path.basename(image))[0]
+    out_geom_name = image_base + "_extended" + ".geom"
+    out_path = os.path.join(output_dir, out_geom_name)
+
+    app_metadata_correction = otb.Registry.CreateApplication("SARMetadataCorrection")
+    app_metadata_correction.SetParameterString("mode", "orbits")
+    app_metadata_correction.SetParameterString("insar", image)
+    app_metadata_correction.SetParameterString("infineorbits", eof_path_file)
+    app_metadata_correction.SetParameterString("outkwl", out_path)
+    app_metadata_correction.ExecuteAndWriteOutput()
+
+    return out_path
+
+def apply_eof_path_on_orbit(sensor, eof_path, image, image_kwl, output_dir):
+    """Change orbit vector by the eof file (if sensor = S1 and if eof_paht is present)
+    Return an extended filename to target the "fine" geom file or retunr image as it was
+    (image base name, only)
+    """
+
+    new_image_name = os.path.basename(image)
+
+    # Find eof files for each image if S1 only and eof_paht not empty
+    # Then, create the "fine" geom (with precise orbits)
+    # Eventually, assign an extended filename if EOF file correspond to the image
+    if sensor in [str(Satellite.S1A), str(Satellite.S1B)]:
+        if eof_path:
+
+            # Get all eof files in eof_path
+            list_of_eof = get_all_files_with_ext(eof_path, ".EOF")
+
+            # Get image information (start/end date + instrment :S1A or S1B) from the input keyword list
+            start_date = image_kwl['support_data.first_line_time']
+            end_date = image_kwl['support_data.last_line_time']
+            # Get satellite number : S1A or S1B
+            sat_number = image_kwl['manifest_data.instrument']
+
+            # Get a eof file
+            eof_file = select_eof_with_date(start_date, end_date, list_of_eof, sat_number)
+
+            if (eof_file):
+                # Create the new geom file into dedicated repository
+                extended_geom_path = os.path.join(output_dir, "extended_geom")
+                if not os.path.exists(extended_geom_path):
+                    os.makedirs(extended_geom_path)
+
+                # Call SARMetadataCorrection
+                out_geom_path = metadata_correction_with_fine_orbits(image,
+                                                                     os.path.join(eof_path, eof_file),
+                                                                     extended_geom_path)
+
+
+                # Assign new geom file with extended filename
+                new_image_name += "?geom=" + out_geom_path
+
+
+    return new_image_name
+
+
+# utils function to define geometry
+def image_envelope(in_tif, out_shp):
+    """
+        This method returns a shapefile of an image
+    """
+    app = otb.Registry.CreateApplication("ImageEnvelope")
+    app.SetParameterString("in", in_tif)
+    app.SetParameterString("out", out_shp)
+    app.ExecuteAndWriteOutput()
+    return out_shp
+
+def get_master_geometry(in_shp):
+    """
+        This method returns the geometry, of an input georeferenced
+        shapefile
+    """
+    driver = ogr.GetDriverByName("ESRI Shapefile")
+    mstr_ds = driver.Open(in_shp, 0)
+    mstr_layer = mstr_ds.GetLayer()
+    for master in mstr_layer:
+        master.GetGeometryRef().Clone()
+        return master.GetGeometryRef().Clone()
+
+def check_srtm_coverage(in_shp_geo, srtm):
+    """
+        This method checks and returns the SRTM tiles intersected
+    """
+    driver = ogr.GetDriverByName("ESRI Shapefile")
+    srtm_ds = driver.Open(srtm, 0)
+    srtm_layer = srtm_ds.GetLayer()
+    needed_srtm_tiles = {}
+    srtm_tiles = []
+    for srtm_tile in srtm_layer:
+        srtm_footprint = srtm_tile.GetGeometryRef()
+        intersection = in_shp_geo.Intersection(srtm_footprint)
+        if intersection.GetArea() > 0:
+            # coverage = intersection.GetArea()/area
+            srtm_tiles.append(srtm_tile.GetField('FILE'))  # ,coverage))
+    needed_srtm_tiles = srtm_tiles
+    return needed_srtm_tiles
+
+
+def add_wgs_projection(in_tiff):
+    """
+        Add a projection reference (WPS 84) to input tiff
+    """
+    # Open input tiff
+    ds = gdal.Open(in_tiff, gdal.GA_Update)
+
+    # First : Adapt Projetion for WGS84
+    wkt = ds.GetProjection()
+
+    # if no projection defined => projection by default (ESPG 4326)
+    if not wkt:
+        sr = osr.SpatialReference()
+        sr.ImportFromEPSG(4326) # ESPG : 4326 = WGS84
+        wkt = sr.ExportToWkt()
+
+    # Set Projection and set to None to apply chgts
+    ds.SetProjection(wkt)
+    ds = None
+
+
+def get_all_tiff(pol, iw="", ext="", search_dir="."):
+    """
+        Get all tiff from an input directory (check on pattern)
+    """
+    tiff_list = []
+    throw_warning = False
+
+    # Mode S1 IW
+    if iw != "":
+        for _, _, files in os.walk(search_dir):
+            for i in (i for i in files):
+
+                # Selection with extension (.tiff)
+                if i.endswith(".tiff"):
+
+                    # Check pattern
+                    correct = check_image_pattern(i, "S1IW")
+
+                    if correct:
+                        # Selection with polarisation and subwath
+                        if pol == i.split("-")[3]:
+                            if iw == i.split("-")[1]:
+                                tiff_list.append(i)
+                    else:
+                        throw_warning = True
+
+    else:
+        # Mode Cosmo
+        if ext == "h5":
+            for _, _, files in os.walk(search_dir):
+                for i in (i for i in files):
+
+                    # Selection with extension (.h5)
+                    if i.endswith(".h5"):
+
+                        # Check pattern
+                        correct = check_image_pattern(i, "Cosmo")
+
+                        if correct:
+                            # Selection with polarisation
+                            if pol == i.split("_")[5]:
+                                tiff_list.append(i)
+                        else:
+                            throw_warning = True
+
+        # Mode S1 SM
+        if ext != "h5":
+            for _, _, files in os.walk(search_dir):
+                for i in (i for i in files):
+
+                    # Selection with extension (.tiff)
+                    if i.endswith(".tiff"):
+
+                        # Check pattern
+                        correct = check_image_pattern(i, str(Sensor.S1SM))
+
+                        if correct:
+                            # Selection with polarisation
+                            if pol == i.split("-")[3]:
+                                tiff_list.append(i)
+                        else:
+                            throw_warning = True
+
+
+    return tiff_list, throw_warning
+
+def get_all_files_with_ext(search_dir, ext):
+    """
+        Get all into a search directory with a given extension
+    """
+    list_files = []
+    for _, _, files in os.walk(search_dir):
+        for i in (i for i in files):
+            if i.endswith(ext):
+                list_files.append(i)
+
+    return list_files
+
+
+def get_date(in_tif, ext=""):
+    """
+        Get all date from an input tiff
+    """
+    if ext == "h5":
+        in_tif_date = in_tif.split("_")[8][:8]
+        return in_tif_date
+    if ext != "h5":
+        in_tif_date = in_tif.split("-")[4].split("t")[0]
+        return in_tif_date
+
+
+def get_tiff_with_dates(start, end, exclude, tiff_list, ext=""):
+    """
+        Get all tiff from an input list and between start and end date
+    """
+    date_list = []
+    exclude = list(exclude)
+    for i in tiff_list:
+        date = get_date(i, ext)
+        if start <= int(date) and end >= int(date):
+            if date not in exclude:
+                date_list.append(i)
+    return date_list
+
+def select_eof_with_date(start, end, eof_list, sat_number="S1A"):
+    """
+        Select into the input list, the file that correspond to dates
+    """
+    time_start = time.mktime(time.strptime(start,
+                                           "%Y-%m-%dT%H%M%S.%f"))
+
+    time_end = time.mktime(time.strptime(end,
+                                         "%Y-%m-%dT%H%M%S.%f"))
+
+
+    for i_eof in eof_list:
+        # Get sat number for current EOF file
+        sat_nb = i_eof.split('_')[0]
+
+        if sat_nb == sat_number:
+
+            # Without extension
+            i_eof = i_eof.split(".EOF")[0]
+
+            start_eof_date = i_eof.split('_')[-2]
+            start_eof_date = start_eof_date.split("V")[1]
+            end_eof_date = i_eof.split('_')[-1]
+
+            # Save date format
+            time_start_eof_date = time.mktime(time.strptime(start_eof_date,
+                                                            "%Y%m%dT%H%M%S"))
+
+            time_end_eof_date = time.mktime(time.strptime(end_eof_date,
+                                                          "%Y%m%dT%H%M%S"))
+
+
+            # Compare dates and return eof file if eof file contains the current image dates
+            if time_start >= time_start_eof_date and time_end <= time_end_eof_date:
+                return i_eof + ".EOF"
+
+    # if none of files contains wanted dates : return None
+    return None
+
+
+def get_relative_orbit(manifest):
+    """
+        Get from manifest file, the orbit number
+    """
+    root = ET.parse(manifest)
+    return int(root.find("metadataSection/metadataObject/metadataWrap/xmlData/{http://www.esa.int/safe/sentinel-1.0}orbitReference/{http://www.esa.int/safe/sentinel-1.0}relativeOrbitNumber").text)
+
+def build_virutal_raster(master_image, start_time, end_time, master_date,
+                         srtm_shapefile, hgts_path, output_dir="."):
+    """
+        Build a vrt file corresponding to a dem from hgt (SRTM) files.
+        The hgt file are contained into a global path : hgts_path
+    """
+    # create a vector envelope of the raster
+    target_dir = os.path.dirname(master_image)
+    master_envelope = (target_dir + "/master_envelope.shp")
+    master_envelope = image_envelope(master_image, master_envelope)
+    # Get master geometry
+    master_footprint = get_master_geometry(master_envelope)
+    # Create a virtual raster that will be used as DEM
+    hgts = check_srtm_coverage(master_footprint, srtm_shapefile)
+    hgts_tuple = []
+    for hgt in hgts:
+        hgts_tuple.append(os.path.join(hgts_path, hgt))
+
+    print("\n Creating virtual raster from intersected hgt files...\n")
+    dem = gdal.BuildVRT("{}/output_{}_to_{}_m_{}/dem_scene.vrt".format(
+        output_dir, start_time, end_time, master_date), hgts_tuple)
+    dem = "{}/output_{}_to_{}_m_{}/dem_scene.vrt".format(output_dir, start_time,
+                                                         end_time, master_date)
+    return dem, target_dir
+
+def arg_dates_to_iso_dates(start_date, end_date):
+    """
+        Date conversion
+    """
+    if start_date is not None:
+        iso_start = datetime.datetime.strptime(start_date,
+                                               "%Y%m%d").date()
+        iso_end = datetime.datetime.strptime(end_date, "%Y%m%d").date()
+        print("\n\n Selected dates: \n From {}, to {} \n".format(iso_start,
+                                                                 iso_end))
+    if not start_date:
+        print("Start time is needeed.")
+        quit()
+    return iso_start, iso_end
+
+### Image operations ###
+def extract_roi(inp, out, roi):
+    """
+        Extracts ROI
+    """
+    ds = gdal.Open(inp)
+    ds = gdal.Translate(out, ds, projWin=roi)
+    ds = None # close and save ds
+
+
+def extract_band123(inp, out):
+    """
+        Extracts ROI from a vector image (extracts several bands, 3 in total)
+    """
+    ds = gdal.Open(inp, gdal.GA_ReadOnly)
+    ds = gdal.Translate(out, ds, bandList=["1", "2", "3"], format="GTiff",
+                        outputType=gdal.GDT_Float32, creationOptions=['TILED=YES'])
+    ds = None # close and save ds
+    return out
+
+
+def silentremove(directory, filename):
+    """Remove some files
+    """
+    try:
+        os.remove(os.path.join(directory, filename))
+    except OSError as e:
+        if e.errno != errno.ENOENT:
+            raise
+    except NameError as e:
+        if e.errno != errno.ENOENT:
+            raise
+
+### Functions for some checks ###
+def select_burst(dict_master, dict_slave, first_burst, last_burst):
+    """
+        Selects from master bursts, the corresponding slave ones.
+        This selection uses a metadata parameter (azimuth_anx_time) to do so.
+    """
+    key1_burst = "support_data.geom.bursts.burst["
+
+    nb_burst_slave = int(dict_slave['support_data.geom.bursts.number'])
+
+    # Initialize the output lists (empty lists)
+    valid_burst_master = []
+    valid_burst_slave = []
+
+    # Loop on Master bursts
+    for id_b in range(first_burst, last_burst+1):
+        key_burst_master = key1_burst + str(id_b) + "].azimuth_anx_time"
+
+        # Get the anx time for the current burst (into Master Image)
+        anx_master = float(dict_master[key_burst_master])
+
+        # Loop on slave bursts to find the closest anx time
+        min_diff = 200
+        id_b_save = id_b
+        for id_b_slave in range(0, nb_burst_slave):
+            key_burst_slave = key1_burst + str(id_b_slave) + "].azimuth_anx_time"
+
+            # Get anx time for slave burst
+            anx_slave = float(dict_slave[key_burst_slave])
+
+            # Comparaison between master and slave
+            diff = abs(anx_master - anx_slave)
+
+            if min_diff > diff:
+                min_diff = diff
+                id_b_save = id_b_slave
+
+        # Check if difference between the anx time is valid (must be inferior to 1)
+        if min_diff < 1.:
+            # Fill lists with master Burst_id and the selected slave burst_id
+            valid_burst_master.append(id_b)
+            valid_burst_slave.append(id_b_save)
+
+    return valid_burst_master, valid_burst_slave
+
+def check_roi_format(roi):
+    """
+        Check the roi format, must be 'ulx uly lrx lry' (ex: -roi 2.44115 48.96126 2.44176 48.95927)
+    """
+    regx_exp = re.compile("^([+-]?([0-9]+([.][0-9]*)?|[.][0-9]+)\\s){4}$")
+    if not roi.endswith(' '):
+        roi = roi + ' '
+
+    res = regx_exp.match(roi)
+    # If res = None => does not match with the correct format => quit the progrm
+    if not res:
+        print("Wrong format for roi paramater, must be 'ulx uly lrx lry' (ex: -roi 2.44115 48.96126 2.44176 48.95927)")
+        quit()
+
+def str2bool(v):
+    """
+        Conversion between a string to a boolean (several ways to say yes into json configuration file).
+    """
+    return v.lower() in ("yes", "true", "t", "1")
+
+
+def avoid_duplicates(inlist):
+    """
+        Remove duplicates into an input list.
+    """
+    outlist = list(dict.fromkeys(inlist))
+    return outlist
+
+def check_if_exist(file_or_path_or_img):
+    """
+        Check if a file, path or image exists. If not quit the program.
+    """
+    is_exist = True
+    if not os.path.exists(file_or_path_or_img):
+        is_exist = False
+    return is_exist
+
+
+def check_if_dir(in_dir):
+    """
+        Check if the input path exists and is a directory
+    """
+    is_dir = True
+    if not os.path.isdir(in_dir):
+        is_dir = False
+    return is_dir
+
+def check_burst_index(burst_index):
+    """
+        Check if the burst_index as string input is correctly sent
+    """
+    burst_index_ok = bool(re.match(r'^[\-0-9]+$', burst_index))
+    first_burst = 0
+    last_burst = 0
+    if burst_index_ok:
+        burst_list = burst_index.split('-')
+        burst_list = [int(i) for i in burst_list]
+        if len(burst_list) != 2:
+            burst_index_ok = False
+        else:
+            first_burst = min(burst_list)
+            last_burst = max(burst_list)
+    if not burst_index_ok:
+        print("Wrong Burst Index Format (for instance 0-5)")
+        quit()
+    return first_burst, last_burst, burst_list
diff --git a/python_src/lib/processing/core/__init__.py b/python_src/lib/processing/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python_src/SAR_MultiSlc.py b/python_src/old/SAR_MultiSlc.py
similarity index 100%
rename from python_src/SAR_MultiSlc.py
rename to python_src/old/SAR_MultiSlc.py
diff --git a/python_src/SAR_MultiSlc_IW.py b/python_src/old/SAR_MultiSlc_IW.py
similarity index 100%
rename from python_src/SAR_MultiSlc_IW.py
rename to python_src/old/SAR_MultiSlc_IW.py
diff --git a/python_src/coRegistation_S1IW.py b/python_src/old/coRegistation_S1IW.py
similarity index 100%
rename from python_src/coRegistation_S1IW.py
rename to python_src/old/coRegistation_S1IW.py
diff --git a/python_src/diapOTB.py b/python_src/old/diapOTB.py
similarity index 100%
rename from python_src/diapOTB.py
rename to python_src/old/diapOTB.py
diff --git a/python_src/diapOTB_S1IW.py b/python_src/old/diapOTB_S1IW.py
similarity index 100%
rename from python_src/diapOTB_S1IW.py
rename to python_src/old/diapOTB_S1IW.py
diff --git a/python_src/processings/DInSar.py b/python_src/old/processings/DInSar.py
similarity index 100%
rename from python_src/processings/DInSar.py
rename to python_src/old/processings/DInSar.py
diff --git a/python_src/processings/Ground.py b/python_src/old/processings/Ground.py
similarity index 100%
rename from python_src/processings/Ground.py
rename to python_src/old/processings/Ground.py
diff --git a/python_src/processings/Metadata_Correction.py b/python_src/old/processings/Metadata_Correction.py
similarity index 100%
rename from python_src/processings/Metadata_Correction.py
rename to python_src/old/processings/Metadata_Correction.py
diff --git a/python_src/processings/Post_Processing.py b/python_src/old/processings/Post_Processing.py
similarity index 100%
rename from python_src/processings/Post_Processing.py
rename to python_src/old/processings/Post_Processing.py
diff --git a/python_src/processings/Pre_Processing.py b/python_src/old/processings/Pre_Processing.py
similarity index 100%
rename from python_src/processings/Pre_Processing.py
rename to python_src/old/processings/Pre_Processing.py
diff --git a/python_src/old/processings/__init__.py b/python_src/old/processings/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python_src/old/share/ex_conda_env.yml b/python_src/old/share/ex_conda_env.yml
new file mode 100755
index 0000000000000000000000000000000000000000..ee4167ac89168675eb7767f41db893ddb51797d6
--- /dev/null
+++ b/python_src/old/share/ex_conda_env.yml
@@ -0,0 +1,110 @@
+name: diapOTB
+channels:
+  - conda-forge
+  - defaults
+dependencies:
+  - _libgcc_mutex=0.1=conda_forge
+  - _openmp_mutex=4.5=1_gnu
+  - boost-cpp=1.74.0=hc6e9bd1_3
+  - bzip2=1.0.8=h7f98852_4
+  - c-ares=1.17.1=h7f98852_1
+  - ca-certificates=2021.5.30=ha878542_0
+  - cached-property=1.5.2=hd8ed1ab_1
+  - cached_property=1.5.2=pyha770c72_1
+  - cairo=1.16.0=h6cf1ce9_1008
+  - certifi=2021.5.30=py39hf3d152e_0
+  - cfitsio=3.470=hb418390_7
+  - curl=7.77.0=hea6ffbf_0
+  - expat=2.4.1=h9c3ff4c_0
+  - fontconfig=2.13.1=hba837de_1005
+  - freetype=2.10.4=h0708190_1
+  - freexl=1.0.6=h7f98852_0
+  - gdal=3.2.2=py39h409cc32_3
+  - geos=3.9.1=h9c3ff4c_2
+  - geotiff=1.6.0=hcf90da6_5
+  - gettext=0.19.8.1=h0b5b191_1005
+  - giflib=5.2.1=h36c2ea0_2
+  - h5py=3.2.1=nompi_py39h98ba4bc_100
+  - hdf4=4.2.15=h10796ff_3
+  - hdf5=1.10.6=nompi_h6a2412b_1114
+  - icu=68.1=h58526e2_0
+  - jpeg=9d=h36c2ea0_0
+  - json-c=0.15=h98cffda_0
+  - kealib=1.4.14=hcc255d8_2
+  - krb5=1.19.1=hcc1bbae_0
+  - ld_impl_linux-64=2.33.1=h53a641e_7
+  - libblas=3.9.0=9_openblas
+  - libcblas=3.9.0=9_openblas
+  - libcurl=7.77.0=h2574ce0_0
+  - libdap4=3.20.6=hd7c4107_2
+  - libedit=3.1.20191231=he28a2e2_2
+  - libev=4.33=h516909a_1
+  - libffi=3.3=he6710b0_2
+  - libgcc-ng=9.3.0=h2828fa1_19
+  - libgdal=3.2.2=hbf32332_3
+  - libgfortran-ng=9.3.0=hff62375_19
+  - libgfortran5=9.3.0=hff62375_19
+  - libglib=2.68.2=h3e27bee_2
+  - libgomp=9.3.0=h2828fa1_19
+  - libiconv=1.16=h516909a_0
+  - libkml=1.3.0=h238a007_1013
+  - liblapack=3.9.0=9_openblas
+  - libnetcdf=4.8.0=nompi_hcd642e3_103
+  - libnghttp2=1.43.0=h812cca2_0
+  - libopenblas=0.3.15=pthreads_h8fe5266_1
+  - libpng=1.6.37=h21135ba_2
+  - libpq=13.3=hd57d9b9_0
+  - librttopo=1.1.0=h1185371_6
+  - libspatialite=5.0.1=h20cb978_4
+  - libssh2=1.9.0=ha56f1ee_6
+  - libstdcxx-ng=9.3.0=h6de172a_19
+  - libtiff=4.2.0=h85742a9_0
+  - libuuid=2.32.1=h7f98852_1000
+  - libwebp-base=1.2.0=h7f98852_2
+  - libxcb=1.13=h7f98852_1003
+  - libxml2=2.9.12=h72842e0_0
+  - libzip=1.7.3=h4de3113_0
+  - lz4-c=1.9.3=h9c3ff4c_0
+  - ncurses=6.2=he6710b0_1
+  - numpy=1.20.3=py39hdbf815f_1
+  - openjpeg=2.4.0=hb52868f_1
+  - openssl=1.1.1k=h7f98852_0
+  - pcre=8.44=he1b5a44_0
+  - pip=21.1.1=py39h06a4308_0
+  - pixman=0.40.0=h36c2ea0_0
+  - poppler=21.03.0=h93df280_0
+  - poppler-data=0.4.10=0
+  - postgresql=13.3=h2510834_0
+  - proj=8.0.0=h277dcde_0
+  - pthread-stubs=0.4=h36c2ea0_1001
+  - python=3.9.5=hdb3f193_3
+  - python_abi=3.9=1_cp39
+  - readline=8.1=h27cfd23_0
+  - setuptools=52.0.0=py39h06a4308_0
+  - sqlite=3.35.4=hdfb4753_0
+  - tiledb=2.2.9=h91fcb0e_0
+  - tk=8.6.10=hbc83047_0
+  - tzcode=2021a=h7f98852_1
+  - tzdata=2020f=h52ac0ba_0
+  - wheel=0.36.2=pyhd3eb1b0_0
+  - xerces-c=3.2.3=h9d8b166_2
+  - xorg-kbproto=1.0.7=h7f98852_1002
+  - xorg-libice=1.0.10=h7f98852_0
+  - xorg-libsm=1.2.3=hd9c2040_1000
+  - xorg-libx11=1.7.1=h7f98852_0
+  - xorg-libxau=1.0.9=h7f98852_0
+  - xorg-libxdmcp=1.1.3=h7f98852_0
+  - xorg-libxext=1.3.4=h7f98852_1
+  - xorg-libxrender=0.9.10=h7f98852_1003
+  - xorg-renderproto=0.11.1=h7f98852_1002
+  - xorg-xextproto=7.3.0=h7f98852_1002
+  - xorg-xproto=7.0.31=h7f98852_1007
+  - xz=5.2.5=h7b6447c_0
+  - zlib=1.2.11=h7b6447c_3
+  - zstd=1.4.9=ha95c52a_0
+  - pip:
+    - attrs==21.2.0
+    - jsonschema==3.2.0
+    - pyrsistent==0.17.3
+    - six==1.16.0
+prefix: /home/gaelle/anaconda3/envs/diapOTB
diff --git a/python_src/old/share/ex_config/ex_config_MultiSlc_CosmoS1SM.json b/python_src/old/share/ex_config/ex_config_MultiSlc_CosmoS1SM.json
new file mode 100644
index 0000000000000000000000000000000000000000..cf1c662ea8f7e3aa1aefae66a88b37b171ae31ac
--- /dev/null
+++ b/python_src/old/share/ex_config/ex_config_MultiSlc_CosmoS1SM.json
@@ -0,0 +1,72 @@
+{
+    "Global": {
+        "in":
+        {
+            "SRTM_Shapefile": "pathToSHP/srtm.shp",
+            "SRTM_Path": "pathToSRTM_30_hgt/",
+            "Geoid": "pathToGeoid/egm96.grd",
+            "Master_Image": "image_1.tiff",
+            "Start_Date": "20150809",
+            "End_Date": "20150902",
+            "Input_Path": "pathToInputDir"
+        },
+        "out":
+        {
+            "Output_Path": "pathToOutputDir"
+        },
+        "parameter":
+        {
+            "clean" : "true",
+	    "optram" : 256
+        }
+    },
+
+    "Pre_Processing": {
+        "out":
+        {
+            "doppler_file": "dop0.txt"
+        },
+        "parameter":
+        {
+            "ML_gain": 0.1,
+            "ML_ran": 3,
+            "ML_azi": 3
+        }
+    },
+    "Metadata_Correction":
+    {
+        "out":
+        {
+            "fine_metadata_file": "fine_metadata.txt"
+        },
+        "parameter":
+        {
+            "activate": false,
+            "GridStep_range": 150,
+            "GridStep_azimut": 150
+        }
+    },
+    "DIn_SAR":
+    {
+        "parameter":
+        {
+            "GridStep_range": 150,
+            "GridStep_azimut": 150,
+            "Grid_Threshold": 0.3,
+            "Grid_Gap": 1000,
+            "Interferogram_gain": 0.1,
+            "Activate_Interferogram": "yes"
+        }
+    },
+    "Post_Processing":
+    {
+        "parameter":
+        {
+            "Activate_Ortho": "yes",
+            "Spacingxy": 0.0001,
+	    "Activate_Filtering" : "yes",
+	    "Filtered_Interferogram_mlran" : 3,
+	    "Filtered_Interferogram_mlazi" : 3
+        }
+    }
+}
diff --git a/python_src/old/share/ex_config/ex_config_MultiSlc_IW.json b/python_src/old/share/ex_config/ex_config_MultiSlc_IW.json
new file mode 100644
index 0000000000000000000000000000000000000000..8079915ce90d4a85b77d09514e70b002cc7c02a3
--- /dev/null
+++ b/python_src/old/share/ex_config/ex_config_MultiSlc_IW.json
@@ -0,0 +1,64 @@
+{
+    "Global": {
+        "in":
+        {
+            "SRTM_Shapefile": "pathToSHP/srtm.shp",
+            "SRTM_Path": "pathToSRTM_30_hgt/",
+            "Geoid": "pathToGeoid/egm96.grd",
+            "Master_Image": "image_1.tiff",
+            "Start_Date": "20150809",
+            "End_Date": "20150902",
+            "Input_Path": "pathToInputDir"
+        },
+        "out":
+        {
+            "Output_Path": "pathToOutputDir"
+        },
+        "parameter":
+        {
+            "clean" : "true",
+            "burst_index": "0-8",
+            "optram" : 256,
+	    "tmpdir_into_outputdir": "yes"
+        }
+    },
+
+    "Pre_Processing": {
+        "out":
+        {
+            "doppler_file": "dop0.txt"
+        },
+        "parameter":
+        {
+	    "ML_ran": 8,
+ 	    "ML_azi": 2,
+            "ML_gain": 0.1
+        }
+    },
+    "Ground": {},
+    "DIn_SAR":
+    {
+        "parameter":
+        {
+            "GridStep_range": 160,
+            "GridStep_azimut":160,
+            "Grid_Threshold": 0.3,
+            "Grid_Gap": 1000,
+            "Interferogram_gain": 0.1,
+	    "Activate_Interferogram": "yes",
+	    "ESD_iter": 2
+        }
+    },
+    "Post_Processing":
+    {
+        "parameter":
+        {
+            "Activate_Ortho": "yes",
+            "Spacingxy": 0.0001,
+	    "Activate_Filtering" : "yes",
+	    "Filtered_Interferogram_mlran" : 3,
+	    "Filtered_Interferogram_mlazi" : 3
+        }
+    }
+}
+
diff --git a/python_src/old/share/ex_config/ex_config_diapOTB_Cosmo.json b/python_src/old/share/ex_config/ex_config_diapOTB_Cosmo.json
new file mode 100644
index 0000000000000000000000000000000000000000..47f2226446639e6c3827b8c458408b1c9bd7f242
--- /dev/null
+++ b/python_src/old/share/ex_config/ex_config_diapOTB_Cosmo.json
@@ -0,0 +1,66 @@
+{
+    "Global": {
+	"in": 
+	{
+	    "Master_Image_Path": "image_1.tif",
+	    "Slave_Image_Path": "image_2.tif",
+	    "DEM_Path": "./DEM.hgt"
+	},
+	"out": 
+	{
+	    "output_dir": "./output_diapOTB"
+	},
+	"parameter":
+	{
+	    "optram" : 256
+	}
+    },
+    
+    "Pre_Processing": {
+	"out": 
+	{
+	    "doppler_file": "dop0.txt"
+	},
+	"parameter":
+	{
+	    "ML_range": 3,
+	    "ML_azimut": 3,
+	    "ML_gain": 0.1
+	}
+    },
+    "Metadata_Correction": 
+    {
+	"out": 
+	{
+	    "fine_metadata_file": "fine_metadata.txt"
+	},
+	"parameter":
+	{
+	    "activate": false,
+	    "GridStep_range": 150,
+	    "GridStep_azimut": 150
+	}
+    },
+    "DIn_SAR": 
+    {
+	"parameter":
+	{
+	    "GridStep_range": 150,
+	    "GridStep_azimut": 150,
+	    "Grid_Threshold": 0.3,
+	    "Grid_Gap": 3000,
+	    "Interferogram_gain": 0.1
+	}
+    },
+    "Post_Processing":
+    {
+        "parameter":
+        {
+            "Activate_Ortho": "yes",
+            "Spacingxy": 0.0001,
+	    "Activate_Filtering" : "yes",
+	    "Filtered_Interferogram_mlran" : 3,
+	    "Filtered_Interferogram_mlazi" : 3
+        }
+    }
+}
diff --git a/python_src/old/share/ex_config/ex_config_diapOTB_S1IW.json b/python_src/old/share/ex_config/ex_config_diapOTB_S1IW.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb2b65daafa7ce0a9ecdb268edecba799036468d
--- /dev/null
+++ b/python_src/old/share/ex_config/ex_config_diapOTB_S1IW.json
@@ -0,0 +1,56 @@
+{
+    "Global": {
+	"in": 
+	{
+	    "Master_Image_Path": "image_1.tif",
+	    "Slave_Image_Path": "image_2.tif",
+	    "DEM_Path": "./DEM.hgt"
+	},
+	"out": 
+	{
+	    "output_dir": "./output_diapOTB"
+	},
+	"parameter":
+	{
+	    "burst_index": "0-8",
+	    "optram" : 256
+	}
+    },
+    
+    "Pre_Processing": {
+	"out": 
+	{
+	    "doppler_file": "dop0.txt"
+	},
+	"parameter":
+	{
+	    "ML_range": 8,
+	    "ML_azimut": 2,
+	    "ML_gain": 0.2
+	}
+    },
+    "Ground": {},
+    "DIn_SAR": 
+    {
+	"parameter":
+	{
+	    "GridStep_range": 160,
+	    "GridStep_azimut": 160,
+	    "Grid_Threshold": 0.3,
+	    "Grid_Gap": 1000,
+	    "Interferogram_gain": 0.1,
+	    "ESD_iter": 2
+	}
+    },
+    "Post_Processing":
+    {
+        "parameter":
+        {
+            "Activate_Ortho": "yes",
+            "Spacingxy": 0.0001,
+	    "Activate_Filtering" : "yes",
+	    "Filtered_Interferogram_mlran" : 3,
+	    "Filtered_Interferogram_mlazi" : 3
+        }
+    }
+}
diff --git a/python_src/old/share/ex_config/ex_config_diapOTB_S1SM.json b/python_src/old/share/ex_config/ex_config_diapOTB_S1SM.json
new file mode 100644
index 0000000000000000000000000000000000000000..04993f703127edb8ff06c510487c5d6dfb19203a
--- /dev/null
+++ b/python_src/old/share/ex_config/ex_config_diapOTB_S1SM.json
@@ -0,0 +1,67 @@
+{
+    "Global": {
+	"in": 
+	{
+	    "Master_Image_Path": "image_1.tif",
+	    "Slave_Image_Path": "image_2.tif",
+	    "DEM_Path": "./DEM.hgt"
+	},
+	"out": 
+	{
+	    "output_dir": "./output_diapOTB"
+	},
+	"parameter":
+	{
+	    "optram" : 256
+	}
+
+    },
+    
+    "Pre_Processing": {
+	"out": 
+	{
+	    "doppler_file": "dop0.txt"
+	},
+	"parameter":
+	{
+	    "ML_range": 3,
+	    "ML_azimut": 3,
+	    "ML_gain": 0.1
+	}
+    },
+    "Metadata_Correction": 
+    {
+	"out": 
+	{
+	    "fine_metadata_file": "fine_metadata.txt"
+	},
+	"parameter":
+	{
+	    "activate": false,
+	    "GridStep_range": 150,
+	    "GridStep_azimut": 150
+	}
+    },
+    "DIn_SAR": 
+    {
+	"parameter":
+	{
+	    "GridStep_range": 150,
+	    "GridStep_azimut": 150,
+	    "Grid_Threshold": 0.3,
+	    "Grid_Gap": 1000,
+	    "Interferogram_gain": 0.1
+	}
+    },
+    "Post_Processing":
+    {
+        "parameter":
+        {
+            "Activate_Ortho": "yes",
+            "Spacingxy": 0.0001,
+	    "Activate_Filtering" : "yes",
+	    "Filtered_Interferogram_mlran" : 3,
+	    "Filtered_Interferogram_mlazi" : 3
+        }
+    }
+}
diff --git a/python_src/utils/DiapOTB_applications.py b/python_src/old/utils/DiapOTB_applications.py
similarity index 100%
rename from python_src/utils/DiapOTB_applications.py
rename to python_src/old/utils/DiapOTB_applications.py
diff --git a/python_src/old/utils/__init__.py b/python_src/old/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python_src/old/utils/addGCP.py b/python_src/old/utils/addGCP.py
new file mode 100644
index 0000000000000000000000000000000000000000..913e999cf63dd2fe768c6b53a85726679759ddaf
--- /dev/null
+++ b/python_src/old/utils/addGCP.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+""" 
+    addGCP.py
+    ==========
+
+    Python script to add into a tiff file, GCPs information
+ 
+"""
+
+import argparse
+import re
+
+try :
+    import gdal
+    import osr
+except ImportError :
+    import osgeo.gdal as gdal
+    import osgeo.osr as osr
+
+
+# addGCP function
+def addGCP(dictKWL, inTiff):
+    """ 
+    Function to add into a tiff file, GCPs information
+    """
+
+    ###### Create GCPs list from input dict ######
+    # Select keys with gcp in it
+    filtered_dictKWL = {k:v for k,v in dictKWL.items() if "gcp" in k}
+
+    # Create 5 lists : for colunms (pixel), lines, lon, lat and hgt  
+    col_list = [float(filtered_dictKWL[k])
+                for k in filtered_dictKWL.keys() if "im_pt.x" in k]
+    line_list = [float(filtered_dictKWL[k])
+                 for k in filtered_dictKWL.keys() if "im_pt.y" in k]
+    lon_list = [float(filtered_dictKWL[k])
+                for k in filtered_dictKWL.keys() if "world_pt.lon" in k]
+    lat_list = [float(filtered_dictKWL[k])
+                for k in filtered_dictKWL.keys() if "world_pt.lat" in k]
+    hgt_list = [float(filtered_dictKWL[k])
+                for k in filtered_dictKWL.keys() if "world_pt.hgt" in k]
+    
+    gcp_number = int(filtered_dictKWL['support_data.geom.gcp.number'])
+
+    # Check list size
+    if len(col_list) != gcp_number or len(line_list) != gcp_number or \
+       len(lon_list) != gcp_number or len(lat_list) != gcp_number or \
+       len(hgt_list) != gcp_number :
+        print("Wrong size for gcp lists ")
+        quit()
+
+    gcp_list = []
+    for i in range(0, gcp_number):
+        gcp = gdal.GCP(lon_list[i], lat_list[i], hgt_list[i],
+                       col_list[i], line_list[i]) # lon, lat, hgt, col, line
+        gcp_list.append(gcp)
+
+    ###### Add GCPs into the input tiff ######
+    ds = gdal.Open(inTiff, gdal.GA_Update)
+
+    # First : Adapt Projetion for WGS84
+    wkt = ds.GetProjection()
+
+    # if no projection defined => projection by default (ESPG 4326)
+    if not wkt :
+        sr = osr.SpatialReference()
+        sr.ImportFromEPSG(4326) # ESPG : 4326 = WGS84
+        wkt = sr.ExportToWkt()
+
+    # Set GCPs into tiff image
+    ds.SetGCPs(gcp_list, wkt)
+        
+    ds = None
+    
+# Main
+if __name__ == "__main__":
+
+    ###### Get the main argument : geom file and input tiff ######
+    # Check arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("geomfile", help="input geom file with correct GCPs")
+    parser.add_argument("intiff", help="input tiff image to include GCPs in it")
+    args = parser.parse_args()
+    
+    ###### Read geom file ######
+    f_geom =open(args.geomfile, "r")
+    keywordlist = f_geom.read().split("\n") 
+    keywordlist = filter(None, keywordlist)
+    # Transfrom file content to dict
+    dictKWL = { i.split(':')[0] : re.sub(r"[\n\t\s]*", "", i.split(':')[1]) for i in keywordlist }
+
+    addGCP(dictKWL, args.intiff)
diff --git a/python_src/old/utils/compute_spectra_ImageTiff.py b/python_src/old/utils/compute_spectra_ImageTiff.py
new file mode 100755
index 0000000000000000000000000000000000000000..fb044094762c5325de9333cdadf6c682076f007a
--- /dev/null
+++ b/python_src/old/utils/compute_spectra_ImageTiff.py
@@ -0,0 +1,109 @@
+# coding:utf-8
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+import os
+import argparse
+try:
+    import gdal
+except ImportError:
+    import osgeo.gdal as gdal
+def calcul_spectres_tiff(ficima,output_path, output_filename_az, output_filename_dist):
+    """
+    Compute the average spectra in range and in azimuth from a complex SAR image (mandatory format .tif)
+    """
+    # Average spectrum in azimut
+    ###################################
+
+    print("Compute the average spectrum in azimut after loading the modules python and gdal")
+    #Read the image tiff
+    zone = read_tiff(ficima)
+    print ("Dimensions of the tif file ",np.shape(zone))
+    (nb_lig, nb_col) = np.shape(zone[:,:,0])
+    print ("nb lig = ",nb_lig," nb col = ", nb_col)
+
+    lig = np.zeros(nb_lig, dtype = complex)
+    spectre_az = np.zeros(nb_lig, dtype = 'float')
+
+    for i in range(nb_col):
+        lig = zone[:,i,0] + 1j*zone[:,i,1] #All the lines in complex
+        spectre_az = spectre_az + np.abs(np.fft.fft(lig))**2
+
+    spectre_az = np.sqrt(spectre_az) / float(nb_col)
+
+    #Display and Save the spectrum
+    display_spectrum(spectre_az, "Average spectrum in azimut", output_path, output_filename_az)
+
+    # Compute the average spectrum in range
+    #####################################
+
+    print("Compute the average spectrum in range")
+    col = np.zeros(nb_col, dtype = complex)
+    spectre_dist = np.zeros(nb_col, dtype = 'float')
+
+    for i in range(nb_lig):
+        col = zone[i,:,0] + 1j*zone[i,:,1] #All the columns in complex
+        spectre_dist = spectre_dist + np.abs(np.fft.fft(col))**2
+
+    spectre_dist = np.sqrt(spectre_dist) / float(nb_lig)
+
+    #Display and Save the spectrum
+    print("Display and Save the spectrum")
+    display_spectrum(spectre_dist, "Average spectrum in range", output_path, output_filename_dist)
+
+    plt.show()
+
+def read_tiff(filename):
+    """
+    Create a numpy array from the input image tiff.
+    """
+    ds = gdal.Open(filename)
+
+    nb_lig = ds.RasterYSize
+    nb_col = ds.RasterXSize
+    nb_bands = ds.RasterCount
+
+    zone = np.zeros((nb_lig, nb_col, nb_bands))
+
+    for i in range(nb_bands):
+        zone[:,:,i] = np.array(ds.GetRasterBand(i+1).ReadAsArray())
+
+    return zone
+
+def display_spectrum(spectre, title, path, filename):
+    """
+    Display the normalized spectrum.
+    """
+    nb_val = np.size(spectre)
+    xvals = np.arange(nb_val) / float(nb_val)
+
+    plt.figure()
+    plt.plot(xvals, spectre)
+    plt.ylim(0, 1.1*max(spectre))
+    plt.title(title + "\n" + filename)
+    plt.savefig(os.path.join(path,filename))
+    plt.draw()
+
+
+if __name__ == "__main__":
+    description = "Compute of the spectra of the image tif"
+    parser = argparse.ArgumentParser(description=description)
+    parser.add_argument("path_input", type=str, help="Path of the input Tiff Image to get its spectrum. There is not its extension in the path")
+    parser.add_argument("path_output_dir", type=str, help="Path of the output directory where the spectra of the input will be saved")
+    args = parser.parse_args()
+
+
+    print("Usage : python compute_spectra_ImageTiff.py <path_input_tif_image without the extension file> <path_output_directory>")
+    print("The Tiff Image must have 2 bands in float/int. The script cannot read the image with one band in complex")
+    path_input = args.path_input
+    file_input = path_input.split('/')[-1]
+    input_path = os.path.dirname(path_input)
+    if(input_path==''):
+        input_path = '.'
+    nomima = file_input.split('.')[0]
+    ficima=os.path.join(input_path,nomima+".tif")
+    output_path=args.path_output_dir
+    output_filename_az="spectrum_az_"+nomima
+    output_filename_dist="spectrum_dist_"+nomima
+
+    calcul_spectres_tiff(ficima,output_path, output_filename_az, output_filename_dist)
diff --git a/python_src/utils/func_utils.py b/python_src/old/utils/func_utils.py
similarity index 100%
rename from python_src/utils/func_utils.py
rename to python_src/old/utils/func_utils.py
diff --git a/python_src/old/utils/generateConfigFile.py b/python_src/old/utils/generateConfigFile.py
new file mode 100644
index 0000000000000000000000000000000000000000..141d281e18fff59385dfd3dd756006832e9d99dc
--- /dev/null
+++ b/python_src/old/utils/generateConfigFile.py
@@ -0,0 +1,403 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+""" 
+    generateConfigFile.py
+    =====================
+
+    Python script to generate configuration file (json format) with default parameters and user's paths
+ 
+"""
+
+import os
+import sys
+import re
+import json
+import readline, glob
+
+import func_utils
+
+# Util fct for autocompletion
+def complete(text, state):
+    """ 
+    Function for autocompletion
+    """
+    return (glob.glob(text+'*')+[None])[state]
+
+# Print with colors
+def prRed(skk): print("\033[91m {}\033[00m" .format(skk)) 
+def prGreen(skk): print("\033[92m {}\033[00m" .format(skk)) 
+def prYellow(skk): print("\033[93m {}\033[00m" .format(skk)) 
+def prLightPurple(skk): print("\033[94m {}\033[00m" .format(skk)) 
+def prPurple(skk): print("\033[95m {}\033[00m" .format(skk)) 
+def prCyan(skk): print("\033[96m {}\033[00m" .format(skk)) 
+def prLightGray(skk): print("\033[97m {}\033[00m" .format(skk)) 
+def prBlack(skk): print("\033[98m {}\033[00m" .format(skk)) 
+
+# Input with colors
+def genericInput(skk, color) :
+    """ 
+    Override input function for colors and exceptions
+    """
+    try:
+        return input(color .format(skk)) 
+    except KeyboardInterrupt :
+        prRed("Generation Aborted, Ctrl-C detected")
+        sys.exit(1)
+    except :
+        prRed("Generation Aborted")
+        sys.exit(1)
+
+def inRed(skk): return genericInput(skk, "\033[91m {}\033[00m")
+def inGreen(skk): return genericInput(skk, "\033[92m {}\033[00m") 
+def inYellow(skk): return genericInput(skk, "\033[93m {}\033[00m") 
+def inLightPurple(skk): return genericInput(skk, "\033[94m {}\033[00m") 
+def inPurple(skk): return genericInput(skk, "\033[95m {}\033[00m") 
+def inCyan(skk): return genericInput(skk, "\033[96m {}\033[00m") 
+def inLightGray(skk): return genericInput(skk, "\033[97m {}\033[00m") 
+def inBlack(skk): return genericInput(skk, "\033[98m {}\033[00m") 
+
+
+# Questions to user for SAR_MultiSlc* chains
+def askForMultiSlc(dataConfig) :
+    """ 
+    Q&A for SAR_MultiSlc* chains
+    Modify the dataConfig following user's awnsers 
+    """
+
+    # Select sensor if SAR_MultiSlc
+    sensor = "S1IW"
+    if (response == "SAR_MultiSlc") :
+        sensor = inLightPurple("Please, select the wanted sensor S1SM " \
+                       "(for Sentinel-1 StripMap mode) or Cosmo " \
+                       "(for Cosmo-Skymed Spotligth and StriMap mode) : " )
+
+        if sensor not in ["S1SM", "Cosmo"] :
+            prRed("Unknown sensor, please choose between S1SM or Cosmo")
+            quit()
+
+    # SRTM_Shapefile
+    SRTM_Shapefile = os.path.realpath(inPurple("Please, enter your path to srtm shp : " ))
+    func_utils.check_ifExist(SRTM_Shapefile)
+
+    # SRTM_Path
+    SRTM_Path = os.path.realpath(inPurple("Please, enter your path to srtm hgt files : " ))
+    func_utils.check_ifDir(SRTM_Path)
+
+    # Input/Output Paths
+    Input_Path = os.path.realpath(inPurple("Please, enter your path to input images : " ))
+    func_utils.check_ifDir(Input_Path)
+
+    Output_Path = os.path.realpath(inPurple("Where would you like to store the output results : " ))
+    #func_utils.check_ifDir(os.path.dirname(Output_Path))
+    func_utils.check_ifDir(Output_Path)
+
+    # reference image (must be into Input_Path)
+    reference_image = inPurple("Which image is your reference : " )
+    func_utils.check_ifExist(reference_image)
+    reference_image = os.path.basename(reference_image)
+
+    if not func_utils.get_imgFromDir(reference_image, Input_Path) :
+        prRed(reference_image + " not found into given input path " + \
+              "Input_Path")
+        prRed("Please check your input path")
+        quit()
+    else :
+        correct = func_utils.check_image_pattern(reference_image, mode=sensor)
+        if not correct : 
+            prRed("Reference image " + reference_image + " does not respect naming conventions for the " \
+                  "selected sensor")
+            quit()
+
+    # Geoid file
+    res_geoid = inPurple("Would you like to add a geoid file (yes/no) : ")
+    Geoid = None
+
+    if res_geoid == "yes" :
+        Geoid = os.path.realpath(inLightPurple("Please, enter your path to your geoid file : "))
+        func_utils.check_ifExist(Geoid)
+    else :
+        Geoid = os.getenv('OTB_GEOID_FILE')
+        if not Geoid :
+            prRed("Undefined geoid (empty OTB_GEOID_FILE environnement variable)")
+            inPurple("Please indicate a geoid file by setting path or with OTB_GEOID_FILE environnement variable")
+            quit()
+            
+    for retry in range(1,6):
+        # Start/End date for image selection (5 retries for this selection)
+        res_date = inLightPurple("Would you like to specify a start and end date for image selection (yes/no) : ")
+
+        # Dummy dates to select by default all images into Input_Path 
+        start_date = "19000101"
+        end_date = "29000101"
+        pattern = "".join(['\d{8}'])
+        if res_date == "yes" :
+            start_date = inPurple("Please, indicate a start date with YYYYMMDD format : ")
+            if not re.match(pattern, start_date) :
+                prRed("start_date " + start_date + " does not respect the expected format YYYYMMDD")
+                quit()
+
+            end_date = inPurple("Please, indicate a end date with YYYYMMDD format : ")
+            if not re.match(pattern, end_date) :
+                prRed("end_date " + end_date + " does not respect the expected format YYYYMMDD")
+                quit()
+
+        # Indicate to user, all selected images with given dates, polarisation and input_path
+        ext = "tiff"
+        pol = ""
+        iw = ""
+        if sensor == "Cosmo" :
+            ext = "h5"
+            pol = reference_image.split("_")[5]
+        else :
+            pol = reference_image.split("-")[3]
+
+        if sensor == "S1IW" :
+            iw = reference_image.split("-")[1]
+            ext = ""
+
+        exclude = "-9999"
+        tiff_list, throw_warning = func_utils.get_AllTiff(pol=pol, ext=ext, iw=iw, searchDir=Input_Path)
+        tiff_dates = func_utils.get_Tiff_WithDates(int(start_date), int(end_date), exclude, tiff_list, ext)
+
+        # Avoid duplicates 
+        tiff_dates = func_utils.avoidDuplicates(tiff_dates)
+        tiff_dates.remove(reference_image)
+
+        prYellow("For your information, the selected images for processings will be : ")
+        prYellow("As reference : " + reference_image)
+        prYellow("As secondaries : " + str(tiff_dates))
+
+        # Ask to continue if selection OK
+        res_continue = inLightPurple("Do you agree to continue with this selection (yes/no/exit) : ")
+
+        if res_continue == "exit" :
+            prRed("You choose to exit, you can relaunch this script with new inputs")
+            quit()
+
+        if res_continue != "yes" :
+            if retry < 5 :
+                prRed("Previous selection does not fullfill your expectations, please select with different dates")
+            else :
+                prRed("Previous selection does not fullfill your expectations with too many retries.\n You can relaunch this script with new inputs")
+                quit()
+        else :
+            break
+
+    # EOF file
+    EOF_Path = None
+    if sensor != "Cosmo" :
+        res_eof = inLightPurple("Would you like to indicate fine orbits (yes/no) : ")
+
+        if res_eof == "yes" : 
+            EOF_Path = os.path.realpath(inPurple("Please, enter your path to .EOF files : " ))
+            func_utils.check_ifDir(EOF_Path)
+
+
+    # Fill with user's response our generic fields for SAR_MultiSlc* chains
+    dataConfig['Global']['in']['SRTM_Shapefile'] = SRTM_Shapefile
+    dataConfig['Global']['in']['SRTM_Path'] = SRTM_Path
+    dataConfig['Global']['in']['Input_Path'] = Input_Path
+    dataConfig['Global']['in']['Master_Image'] = reference_image
+    dataConfig['Global']['in']['Start_Date'] = start_date
+    dataConfig['Global']['in']['End_Date'] = end_date
+    if Geoid :
+        dataConfig['Global']['in']['Geoid'] = Geoid
+    else :
+        del dataConfig['Global']['in']['Geoid']
+    if EOF_Path :
+        dataConfig['Global']['in']['EOF_Path'] = EOF_Path
+
+    dataConfig['Global']['out']['Output_Path'] = Output_Path    
+
+
+# Questions to user for diapOTB* chains
+def askForDiapOTB(dataConfig) :
+    """ 
+    Q&A for diapOTB* chains
+    Modify the dataConfig following user's awnsers 
+    """
+
+    # Select sensor if diapOTB
+    sensor = "S1IW"
+    if (response == "diapOTB") :
+        sensor = inLightPurple("Please, select the wanted sensor S1SM " \
+                       "(for Sentinel-1 StripMap mode) or Cosmo " \
+                       "(for Cosmo-Skymed Spotligth and StriMap mode) : " )
+
+        if sensor not in ["S1SM", "Cosmo"] :
+            prRed("Unknown sensor, please choose between S1SM or Cosmo")
+            quit()
+
+        
+    # reference image (path to image)
+    reference_image = os.path.realpath(inPurple("Which image is your reference : " ))
+    func_utils.check_ifExist(reference_image)
+    reference_image_base = os.path.basename(reference_image)
+
+    correct = func_utils.check_image_pattern(reference_image_base, mode=sensor)
+    if not correct : 
+        prRed("Reference image " + reference_image_base + " does not respect naming conventions for the " \
+              "selected sensor")
+        quit()
+
+    # reference image (path to image)
+    secondary_image = os.path.realpath(inPurple("Which image is secondary : " ))
+    func_utils.check_ifExist(secondary_image)
+    secondary_image_base = os.path.basename(secondary_image)
+
+    correct = func_utils.check_image_pattern(secondary_image_base, mode=sensor)
+    if not correct : 
+        prRed("Reference image " + secondary_image_base + " does not respect naming conventions for the " \
+              "selected sensor")
+        quit()
+
+
+    # DEM Path
+    DEM_Path = os.path.realpath(inPurple("Please, enter your path to your DEM : "))
+    func_utils.check_ifExist(DEM_Path)
+
+    # Output Path
+    Output_Path = os.path.realpath(inPurple("Where would you like to store the output results : " ))
+    func_utils.check_ifDir(os.path.dirname(Output_Path))
+
+    # EOF file
+    EOF_Path = None
+    if sensor != "Cosmo" :
+        res_eof = inLightPurple("Would you like to indicate fine orbits (yes/no) : ")
+
+        if res_eof == "yes" : 
+            EOF_Path = os.path.realpath(inPurple("Please, enter your path to .EOF files : " ))
+            func_utils.check_ifDir(EOF_Path)
+
+
+    # Fill with user's response our generic fields for SAR_MultiSlc* chains
+    dataConfig['Global']['in']['Master_Image_Path'] = reference_image
+    dataConfig['Global']['in']['Slave_Image_Path'] = secondary_image
+    dataConfig['Global']['in']['DEM_Path'] = DEM_Path
+    if EOF_Path :
+        dataConfig['Global']['in']['EOF_Path'] = EOF_Path
+
+    dataConfig['Global']['out']['output_dir'] = Output_Path   
+
+
+
+###################
+###### Main #######
+###################
+if __name__ == "__main__":
+    
+    ######### Introduction prints #########
+    prCyan("Welcome to DiapOTB remote module !")
+    prCyan("You can generate configuration files for the four available processing chains : diapOTB," \
+          "diapOTB_S1IW, SAR_MultiSlc and SAR_MultiSlc_IW")
+    
+    ######### Load the example for prepare the configuration file according to user's choice #########
+    # First choice for user : the selected chain
+    response = inLightGray("Please, choose your processing chain (diapOTB, diapOTB_S1IW, SAR_MultiSlc and " \
+                     "SAR_MultiSlc_IW) : ")
+    
+    if response not in ['diapOTB', 'diapOTB_S1IW', 'SAR_MultiSlc', 'SAR_MultiSlc_IW'] : 
+        prRed("Wrong chain, please choose between available chains")
+        quit()
+        
+    
+    # Load examples according to the selected chain => Init a dictionnary with default parameters
+    current_path = os.path.dirname(os.path.realpath(__file__))
+    ex_config_path = os.path.join(current_path, "../ex_config")
+   
+    dataConfig = {}
+    if response == "diapOTB" :
+        
+        ex_confile = os.path.join(ex_config_path, "ex_config_diapOTB_Cosmo.json") 
+        dataConfig = func_utils.load_configfile(ex_confile, "S1_SM")
+    
+    elif response == "diapOTB_S1IW" :
+
+        ex_confile = os.path.join(ex_config_path, "ex_config_diapOTB_S1IW.json") 
+        dataConfig = func_utils.load_configfile(ex_confile, "S1_IW")
+    
+    elif response == "SAR_MultiSlc" :
+
+        ex_confile = os.path.join(ex_config_path, "ex_config_MultiSlc_CosmoS1SM.json")
+        dataConfig = func_utils.load_configfile(ex_confile, "multi_S1")
+    
+    elif response == "SAR_MultiSlc_IW" :
+
+        ex_confile = os.path.join(ex_config_path, "ex_config_MultiSlc_IW.json")
+        dataConfig = func_utils.load_configfile(ex_confile, "multi_SW")
+
+
+
+    ########## Prepare for questions : with autocompletion #########
+    readline.set_completer_delims(' \t\n;')
+    readline.parse_and_bind("tab: complete")
+    readline.set_completer(complete)
+    
+    ########## Ask to user for generic fields #########
+    # SAR_MultiSlc* chains
+    if (response == "SAR_MultiSlc" or response == "SAR_MultiSlc_IW") : 
+        try:
+            askForMultiSlc(dataConfig)
+        except :
+            prRed("Generation Aborted")
+            sys.exit(1)
+
+    if (response == "diapOTB" or response == "diapOTB_S1IW") : 
+        try : 
+            askForDiapOTB(dataConfig)
+        except :
+            prRed("Generation Aborted")
+            sys.exit(1)
+
+    # Dump dataConfig with the new fields and the default parameters
+    res_json = os.path.realpath(inLightGray("Where do you want store your configuration file : "))
+    
+    # if directory
+    if os.path.isdir(res_json) :
+        res_json_name = inLightGray("Please, enter a name for your configuration " \
+                                    "file (with .json extension) : ")
+        
+        if os.path.exists(os.path.join(res_json, res_json_name)) :
+            res_json_overwrite = inLightGray("Would you like to overwrite the file " + 
+                                             res_json_name + " (yes/no) : ")
+            
+            if res_json_overwrite == "yes" :
+                with open(os.path.join(res_json, res_json_name), 'w') as f:
+                    json.dump(dataConfig, f, indent=2, sort_keys=False)
+            else :
+                prRed("Generation Aborted")
+                quit()
+
+        else :
+            with open(os.path.join(res_json, res_json_name), 'w') as f:
+                json.dump(dataConfig, f, indent=2, sort_keys=False)
+                
+    # If file (or wrong path)
+    else :
+        if os.path.isdir(os.path.dirname(res_json)) :
+            
+            if os.path.exists(res_json) :
+                res_json_overwrite = inLightGray("Would you like to overwrite the file " + 
+                                                 res_json + " (yes/no) : ")
+            
+                if res_json_overwrite == "yes":
+                    with open(res_json, 'w') as f:
+                        json.dump(dataConfig, f, indent=2, sort_keys=False)
+                else :
+                    prRed("Generation Aborted")
+                    quit()
+            else :
+                with open(res_json, 'w') as f:
+                    json.dump(dataConfig, f, indent=2, sort_keys=False)
+
+        else :
+            prRed("Wrong path for the configuration file, Generation Aborted")
+            
+
+    ######### Conclusion prints #########
+    prGreen("The configuration file was generated !")
+    prCyan("You can modify the parameters in it and launch the processing chain with this file as only argument")
+    prCyan("You can find further information on https://gitlab.orfeo-toolbox.org/remote_modules/diapotb/-/wikis/")
+        
+        
diff --git a/python_src/old/utils/getEOFFromESA.py b/python_src/old/utils/getEOFFromESA.py
new file mode 100644
index 0000000000000000000000000000000000000000..f575d150deb48c6a71020391c718827542bbb431
--- /dev/null
+++ b/python_src/old/utils/getEOFFromESA.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+""" 
+    getEOFFromESA.py
+    ================
+
+    Python script to retrieve from ESA website, the EOF files
+ 
+"""
+
+import re
+import os
+import time
+import requests
+import shutil
+import argparse
+import datetime
+import xml.etree.ElementTree
+
+def searchForSafe(input_dir) :
+    """
+    Search for Safe repository into input_dir
+
+    :input_dir: (str) input directory
+    :returns: (iterable) list of SAFE repository
+    """ 
+    list_SAFE = []
+    for root, dirs, files in os.walk(input_dir):
+        for i in (i for i in dirs): 
+            if i.endswith(".SAFE"):
+                list_SAFE.append(i)
+    
+    return list_SAFE
+
+# Get url for orbits files
+def get_url_orbs(orbs_type, sensor, year, month, day, 
+                 url="https://aux.sentinel1.eo.esa.int/",
+                 orb_list=None):
+    """
+    Get the set of available orbs from url
+
+    :orbs_type: (str) either POEROB or RESORB
+    :sensor: (str) seither S1A or S1B
+    :year: (str) year
+    :month: (str|int) month (as a 2 digit number)
+    :day: (str) day as a two digit number in the month
+    :url: (str) url where to gather the data
+    :returns: (iterable) list of urls of the EOF files
+    """
+    url = url.strip('/')
+    month = "{:02d}".format(int(month))
+    day = "{:02d}".format(int(day))
+    
+    # Pattern to find EOF file into response
+    pattern = "".join(['"', sensor, '_.*?_', orbs_type, '_.*?.EOF"'])
+
+    html_list = []
+    orb_list_tmp = []
+
+    tacquisition0 = datetime.datetime(int(year), int(month), int(day))
+    
+    list_of_days_tosearch = []
+    # Fetch POEORB if orb_type is POEORB or None (ie, first try with Precises orbits)
+    # This kind of orbit is restitued around 21 days later
+    if orbs_type == 'POEORB' or orbs_type is None:
+        list_of_days_tosearch = [21, 20, 22, 19, 23]
+    # Fetch RESORB if orbs_type is RESORB 
+    # This kind of orbit is restitued around 3 hours later (day before to day after safe name)
+    if orbs_type == "RESORB":
+        list_of_days_tosearch = [0, 1, 2]
+
+    if url.startswith("https"):
+        for day in list_of_days_tosearch :
+            
+            tacquisition = tacquisition0 + datetime.timedelta(days=day)
+
+            # Build the url wtih "ESA format"
+            url_orb = "{}/{}/{}/{}/{}/".format(url, orbs_type, tacquisition.year, 
+                                               "{:02d}".format(tacquisition.month), 
+                                               "{:02d}".format(tacquisition.day))
+
+        
+            try:
+                # Make the get request with verify = False to avoid ssl issues
+                response = requests.get(url_orb, verify=False)
+                
+                if response.status_code != 200 and response.status_code != 201: 
+                    print("problem with the url {} ".format(url_orb))
+                    continue
+
+                orb_list_tmp += list(set([url_orb + "/" + x.strip().strip('"').strip()
+                                     for x in re.findall(pattern, response.text)]))
+                    
+
+            except Exception as e:
+                print("exception for url {} ".format(e))
+                return []
+
+    else:
+        raise RuntimeError("malformed url or too recent date, should start by https://")   
+
+        
+    # Add result into orb_list or return orb_list_tmp
+    orb_list += orb_list_tmp
+    
+
+
+# Get contents of orbit files on disk
+def get_orb_content_ondisk(orb_list, output_dir) :
+    """
+    Retrieve on disk the set of selected orbs
+
+    :orb_list: (list) list or url (to get orbit files)
+    :output_dir: (str) output directory (on disk) to put orbit files
+    :returns: () 
+    """
+    for orb in orb_list:
+        try:
+            # Get the content of eof_file
+            response = requests.get(orb, verify=False, stream=True)
+
+            if response.status_code != 200 and response.status_code != 201: 
+                print("problem with the url {} ".format(orb))
+                continue
+
+            # Copy the content on disk
+            with open(os.path.join(output_dir, os.path.basename(orb)), 'wb') as out_file:
+                shutil.copyfileobj(response.raw, out_file)
+
+            del response
+        except Exception as e:
+            print("exception for url {} ".format(e))
+
+
+###################
+###### Main #######
+###################
+if __name__ == "__main__":
+    
+    ###### Get the main argument : indir and outdir ######
+    # Check arguments
+    parser = argparse.ArgumentParser()
+    parser.add_argument("indir", help="input directory to search SAFE repository")
+    parser.add_argument("outdir", help="output directory to store EOF files")
+    parser.add_argument("--type", "-t", type=str, choices=['POEORB', 'RESORB'],
+                        default='POEORB',
+                        help=("The type of data request. If set should be either POEORB or RESORB; "
+                              "if not set test first POEORB and backoff to RESORB else"))
+    args = parser.parse_args()
+
+    # Check input and output directory (if exist)
+    if not os.path.exists(args.indir):
+        print(args.indir + " does not exists")
+        quit()
+
+    if not os.path.exists(args.outdir):
+        print(args.outdir + " does not exists")
+        quit()
+
+    # Require a check of user for connexion parameters (proxy, ...)
+    response = input("Check all your parameters for internet connexion (proxy, ssl ...), before continuing. \
+    \nReady to continue (yes/no) :")
+
+    if response != "yes" :
+        print("Set all your parameters for internet connexion to relaunch this script")
+        quit()
+
+    list_InSAFE = searchForSafe(args.indir)
+
+    if len(list_InSAFE) == 0 :
+        print("None SAFE repository was found into {}".format(args.indir))
+        quit()
+
+        
+    # For each SAFE found, retrieve URLs of EOF files (with date correspondance)
+    orb_list = []
+
+    for safeDir in list_InSAFE:
+        
+        # Tmp lists
+        orb_list_firstSelect = []
+        orb_list_secondSelect = []
+
+        # patterns for SAFE names
+        pattern_date = "".join(['\d{8}', "T", '\d{6}'])
+        pattern_sensor = "".join(["S1."])
+        pattern_orbit = "".join(["_", '\d{6}', "_"])
+        dates = ""
+        sensor = ""
+        orbit_number = ""
+
+        try :
+            dates = re.findall(pattern_date, safeDir)
+            sensor = re.findall(pattern_sensor, safeDir)
+            orbit_number = int(re.findall(pattern_orbit, safeDir)[0][1:-1])
+        except Exception as e:
+            print("Safe name does not match with usual pattern and causes an exception : \
+            {}".format(e))
+            # Next safe
+            continue
+
+        if len(dates) == 2 and len(sensor) == 1 :
+            # First selection for S1A and S1B sensor at corresponding dates
+            get_url_orbs(args.type, sensor[0], dates[0][0:4], dates[0][4:6], 
+                         dates[0][6:8], orb_list=orb_list_firstSelect)
+            
+
+            # Second selection with times
+            time_start = time.mktime(time.strptime(dates[0],
+                                                   "%Y%m%dT%H%M%S"))
+
+            time_end = time.mktime(time.strptime(dates[1],
+                                                 "%Y%m%dT%H%M%S"))
+            
+            for orb in orb_list_firstSelect:
+                eof_file = orb.split('/')[-1]
+                
+                # Without extension
+                i_eof = eof_file.split(".EOF")[0]
+
+                start_eofDate = i_eof.split('_')[-2]
+                start_eofDate = start_eofDate.split("V")[1]
+                end_eofDate = i_eof.split('_')[-1] 
+                
+                # Save date format
+                time_start_eofDate = time.mktime(time.strptime(start_eofDate,
+                                                               "%Y%m%dT%H%M%S"))
+
+                time_end_eofDate = time.mktime(time.strptime(end_eofDate,
+                                                             "%Y%m%dT%H%M%S"))
+
+                # Compare dates and return eof file if eof file contains the current image dates
+                if time_end > time_start_eofDate and time_start <= time_end_eofDate :
+                    orb_list_secondSelect.append(orb)
+                    
+
+            # Last selection/check with orbit (Keep ????)
+            for orb in orb_list_secondSelect:
+                try:
+                    # Make the get request with verify = False to avoid ssl issues
+                    response = requests.get(orb, verify=False)
+                                        
+                    if response.status_code != 200 and response.status_code != 201: 
+                        print("problem with the url {} ".format(url_orb))
+                        continue
+
+                    tree = xml.etree.ElementTree.fromstring(response.text)
+                    
+                    Absolute_Orbit_Elt = tree.findall("Data_Block/List_of_OSVs/OSV/Absolute_Orbit")
+
+                    orbitNumbers = list(set([int(x.text)
+                                             for x in Absolute_Orbit_Elt]))
+
+                    if (orbit_number in orbitNumbers) :
+                        orb_list.append(orb)
+                    
+
+                except Exception as e:
+                    print("Exception {}".format(e))
+                    quit()
+
+        else :
+            print("Wrong SAFE format for  {}".format(args.indir))
+            # Next Safe
+            continue
+        
+
+    print("Selected orbit files (Ready to be retrieved on disk) : ")
+    print(orb_list)
+    
+    # Get selection into output_dir
+    get_orb_content_ondisk(orb_list, args.outdir)
+    
diff --git a/python_src/share/ex_conda_env.yml b/python_src/share/ex_conda_env.yml
old mode 100755
new mode 100644
diff --git a/python_src/share/json_schemas/schema_MultiSlc.json b/python_src/share/json_schemas/schema_MultiSlc.json
new file mode 100644
index 0000000000000000000000000000000000000000..70ec54ed45b974bb16e9eb5a563d8ecc5bf25d7b
--- /dev/null
+++ b/python_src/share/json_schemas/schema_MultiSlc.json
@@ -0,0 +1,165 @@
+{    
+    "$schema": "http://json-schema.org/schema#",
+    "title": "JSON SCHEMA for MultiSlc",
+    "description": "JSON organization for the script SAR_MultiSlc.py",
+
+    "type": "object",
+
+    "allOf": [{"required": ["Global", "Pre_Processing", "DIn_SAR",
+			   "Post_Processing"]}],
+
+    "properties":
+    {
+	"Global": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"in": 
+		{
+		    "type": "object", 
+		    "required": ["SRTM_Shapefile", "SRTM_Path", "Geoid", "Master_Image", "Start_Date", 
+				"End_Date", "Input_Path"],
+		    "additionalProperties": false,
+		    "properties": {"SRTM_Shapefile": {"type": "string"}, 
+				   "SRTM_Path": {"type": "string"},
+				   "EOF_Path": {"type": "string"},
+				   "Geoid": {"type": "string"},
+				   "Master_Image": {"type": "string"},
+				   "Start_Date": {"type": "string"},
+				   "End_Date": {"type": "string"},
+				   "Exclude": {"type": "string", "default": "-9999"},
+				   "Input_Path": {"type": "string"}}
+		}, 
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["Output_Path"],
+		    "additionalProperties": false,
+		    "properties": {"Output_Path": {"type": "string"}}
+		},
+		"sensor": 
+		{
+		    "type": "object",
+		    "required": ["satellite", "mode"],
+		    "additionalProperties": false,
+		    "properties": {"satellite": {"type": "string"},
+				   "mode": {"type": "string"}}
+		},
+		"parameter": 
+		{
+		    "type": "object",
+		    "additionalProperties": false,
+		    "properties": {"clean": {"type": "string", "default": "yes"},
+				  "optram": {"type": "number", "default": 4000}}
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["in", "out"]
+	},
+
+        "Pre_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["doppler_file"],
+		    "additionalProperties": false,
+		    "properties": {"doppler_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["ML_gain"],
+		    "additionalProperties": false,
+		    "properties": {"ML_ran": {"type": "number", "default" : 3},
+				   "ML_azi": {"type": "number", "default" : 3},
+				   "ML_gain": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+	
+        "Metadata_Correction": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["fine_metadata_file"],
+		    "additionalProperties": false,
+		    "properties": {"fine_metadata_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["activate", "GridStep_range", "GridStep_azimut"],
+		    "additionalProperties": false,
+		    "properties": {"activate": {"type": "boolean"},
+				   "GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+
+        "DIn_SAR": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["GridStep_range", "GridStep_azimut", "Grid_Threshold", "Grid_Gap", 
+				 "Interferogram_gain"],
+		    "additionalProperties": false,
+		    "properties": {"GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"},
+				   "Grid_Threshold": {"type": "number"},
+				   "Grid_Gap": {"type": "number"},
+				   "Interferogram_gain": {"type": "number"},
+				   "Interferogram_mlran": {"type": "number"},
+				   "Interferogram_mlazi": {"type": "number"},
+				   "Activate_Interferogram": {"type": "string", "default": "yes"},
+				   "roi": {"type": "string"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	},
+
+	 "Post_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["Activate_Ortho"],
+		    "additionalProperties": false,
+		    "properties": {"Activate_Ortho": {"type": "string"},
+				   "Spacingxy": {"type": "number", "default": 0.0001},
+				   "Activate_Filtering": {"type": "string"},
+				   "Filtered_Interferogram_mlran": {"type": "number"},
+				   "Filtered_Interferogram_mlazi": {"type": "number"},
+				   "Filtering_parameter_alpha": {"type": "number"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	}
+    }
+}
diff --git a/python_src/share/json_schemas/schema_MultiSlc_IW.json b/python_src/share/json_schemas/schema_MultiSlc_IW.json
new file mode 100644
index 0000000000000000000000000000000000000000..8120b9403cf8745ebc114683058a206e827932dc
--- /dev/null
+++ b/python_src/share/json_schemas/schema_MultiSlc_IW.json
@@ -0,0 +1,130 @@
+{    
+    "$schema": "http://json-schema.org/schema#",
+    "title": "JSON SCHEMA for MultiSlc",
+    "description": "JSON organization for the script SAR_MultiSlc_IW.py",
+
+    "type": "object",
+
+    "allOf": [{"required": ["Global", "Pre_Processing", "DIn_SAR", "Post_Processing"]}],
+
+    "properties":
+    {
+	"Global": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"in": 
+		{
+		    "type": "object", 
+		    "required": ["SRTM_Shapefile", "SRTM_Path", "Geoid", "Master_Image", "Start_Date", 
+				"End_Date", "Input_Path"],
+		    "additionalProperties": false,
+		    "properties": {"SRTM_Shapefile": {"type": "string"}, 
+				   "SRTM_Path": {"type": "string"},
+				   "Geoid": {"type": "string"},
+				   "EOF_Path": {"type": "string"},
+				   "Master_Image": {"type": "string"},
+				   "Start_Date": {"type": "string"},
+				   "End_Date": {"type": "string"},
+				   "Exclude": {"type": "string", "default": "-9999"},
+				   "Input_Path": {"type": "string"}}
+		}, 
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["Output_Path"],
+		    "additionalProperties": false,
+		    "properties": {"Output_Path": {"type": "string"}}
+		},
+		"parameter": 
+		{
+		    "type": "object",
+		    "additionalProperties": false,
+		    "properties": {"clean": {"type": "string", "default": "yes"},
+				   "burst_index": {"type": "string", "default": "0-8"},
+				   "optram": {"type": "number", "default": 4000},
+				   "tmpdir_into_outputdir": {"type": "string", "default": "no"}}
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["in", "out"]
+	},
+
+        "Pre_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["doppler_file"],
+		    "additionalProperties": false,
+		    "properties": {"doppler_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["ML_gain"],
+		    "additionalProperties": false,
+		    "properties": {"ML_ran": {"type": "number", "default" : 3},
+				   "ML_azi": {"type": "number", "default" : 3},
+				   "ML_gain": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+
+        "DIn_SAR": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["GridStep_range", "GridStep_azimut", "Grid_Threshold", "Grid_Gap", 
+				 "Interferogram_gain"],
+		    "additionalProperties": false,
+		    "properties": {"GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"},
+				   "Grid_Threshold": {"type": "number"},
+				   "Grid_Gap": {"type": "number"},
+				   "Interferogram_gain": {"type": "number"},
+				   "Activate_Interferogram": {"type": "string", "default": "yes"},
+				   "roi": {"type": "string"},
+				   "ESD_iter" : {"type": ["integer","string"]}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	},
+
+	 "Post_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["Activate_Ortho"],
+		    "additionalProperties": false,
+		    "properties": {"Activate_Ortho": {"type": "string"},
+				   "Spacingxy": {"type": "number", "default": 0.0001},
+				   "Activate_Filtering": {"type": "string"},
+				   "Filtered_Interferogram_mlran": {"type": "number"},
+				   "Filtered_Interferogram_mlazi": {"type": "number"},
+				   "Filtering_parameter_alpha": {"type": "number"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	}
+    }
+}
diff --git a/python_src/share/json_schemas/schema_S1IW.json b/python_src/share/json_schemas/schema_S1IW.json
new file mode 100644
index 0000000000000000000000000000000000000000..d6ea807b104e5406207b8a17371cbe2f267cb03b
--- /dev/null
+++ b/python_src/share/json_schemas/schema_S1IW.json
@@ -0,0 +1,122 @@
+{    
+    "$schema": "http://json-schema.org/schema#",
+    "title": "JSON SCHEMA for DiapOTB S1 IW chain",
+    "description": "JSON organization for the script diapOTB_S1IW.py",
+
+    "type": "object",
+
+    "allOf": [{"required": ["Global", "Pre_Processing", "Ground", "DIn_SAR", "Post_Processing"]}],
+
+    "properties":
+    {
+	"Global": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"in": 
+		{
+		    "type": "object", 
+		    "required": ["Master_Image_Path", "Slave_Image_Path", "DEM_Path"],
+		    "additionalProperties": false,
+		    "properties": {"Master_Image_Path": {"type": "string"}, 
+				  "Slave_Image_Path": {"type": "string"},
+				  "DEM_Path": {"type": "string"},
+				  "EOF_Path": {"type": "string"}}
+		}, 
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["output_dir"],
+		    "additionalProperties": false,
+		    "properties": {"output_dir": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "additionalProperties": false,
+		    "properties": {"burst_index": {"type": "string", "pattern":"^[-0-9]+$"},
+				  "optram": {"type": "number", "default": 4000}}
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["in", "out"]
+	},
+
+        "Pre_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["doppler_file"],
+		    "additionalProperties": false,
+		    "properties": {"doppler_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["ML_range", "ML_azimut", "ML_gain"],
+		    "additionalProperties": false,
+		    "properties": {"ML_range": {"type": "number"},
+				  "ML_azimut": {"type": "number"},
+				  "ML_gain": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+	
+        "Ground": {"type": "object", "additionalProperties": false},
+
+        "DIn_SAR": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["GridStep_range", "GridStep_azimut", "Grid_Threshold", "Grid_Gap", 
+				 "Interferogram_gain"],
+		    "additionalProperties": false,
+		    "properties": {"GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"},
+				   "Grid_Threshold": {"type": "number"},
+				   "Grid_Gap": {"type": "number"},
+				   "Interferogram_gain": {"type": "number"},
+				   "ESD_iter" : {"type": ["integer","string"]}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	},
+
+	 "Post_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["Activate_Ortho"],
+		    "additionalProperties": false,
+		    "properties": {"Activate_Ortho": {"type": "string"},
+				   "Spacingxy": {"type": "number", "default": 0.0001},
+				   "Activate_Filtering": {"type": "string"},
+				   "Filtered_Interferogram_mlran": {"type": "number"},
+				   "Filtered_Interferogram_mlazi": {"type": "number"},
+				   "Filtering_parameter_alpha": {"type": "number"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	}
+    }
+}
diff --git a/python_src/share/json_schemas/schema_S1SM.json b/python_src/share/json_schemas/schema_S1SM.json
new file mode 100644
index 0000000000000000000000000000000000000000..8c7a8ac0cf25bdff53a2c009ddf5370addb01f9d
--- /dev/null
+++ b/python_src/share/json_schemas/schema_S1SM.json
@@ -0,0 +1,156 @@
+{    
+    "$schema": "http://json-schema.org/schema#",
+    "title": "JSON SCHEMA for DiapOTB S1 SM chain",
+    "description": "JSON organization for the script diapOTB.py",
+
+    "type": "object",
+
+    "allOf": [{"required": ["Global", "Pre_Processing", "DIn_SAR", 
+			    "Post_Processing"]}],
+
+    "properties":
+    {
+	"Global": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"in": 
+		{
+		    "type": "object", 
+		    "required": ["Master_Image_Path", "Slave_Image_Path", "DEM_Path"],
+		    "additionalProperties": false,
+		    "properties": {"Master_Image_Path": {"type": "string"}, 
+				  "Slave_Image_Path": {"type": "string"},
+				  "DEM_Path": {"type": "string"},
+				  "EOF_Path": {"type": "string"}}
+		}, 
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["output_dir"],
+		    "additionalProperties": false,
+		    "properties": {"output_dir": {"type": "string"}}
+		},
+		"sensor": 
+		{
+		    "type": "object",
+		    "required": ["satellite", "mode"],
+		    "additionalProperties": false,
+		    "properties": {"satellite": {"type": "string"},
+				   "mode": {"type": "string"}}
+		},
+		"parameter": 
+		{
+		    "type": "object",
+		    "additionalProperties": false,
+		    "properties": {"optram": {"type": "number", "default": 4000}}
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["in", "out"]
+	},
+
+        "Pre_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["doppler_file"],
+		    "additionalProperties": false,
+		    "properties": {"doppler_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["ML_range", "ML_azimut", "ML_gain"],
+		    "additionalProperties": false,
+		    "properties": {"ML_range": {"type": "number"},
+				   "ML_azimut": {"type": "number"},
+				   "ML_gain": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+	
+        "Metadata_Correction": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"out": 
+		{
+		    "type": "object",
+		    "required": ["fine_metadata_file"],
+		    "additionalProperties": false,
+		    "properties": {"fine_metadata_file": {"type": "string"}}
+		}, 
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["activate", "GridStep_range", "GridStep_azimut"],
+		    "additionalProperties": false,
+		    "properties": {"activate": {"type": "boolean"},
+				   "GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"}}
+
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["out", "parameter"]
+	},
+
+        "DIn_SAR": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["GridStep_range", "GridStep_azimut", "Grid_Threshold", "Grid_Gap", 
+				 "Interferogram_gain"],
+		    "additionalProperties": false,
+		    "properties": {"GridStep_range": {"type": "number"},
+				   "GridStep_azimut": {"type": "number"},
+				   "Grid_Threshold": {"type": "number"},
+				   "Grid_Gap": {"type": "number"},
+				   "Interferogram_gain": {"type": "number"},
+				   "Interferogram_mlran": {"type": "number"},
+		                   "Interferogram_mlazi": {"type": "number"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	},
+
+	 "Post_Processing": 
+	{
+	    "type": "object",
+	    "properties": 
+	    {
+		"parameter": 
+		{
+		    "type": "object",
+		    "required": ["Activate_Ortho"],
+		    "additionalProperties": false,
+		    "properties": {"Activate_Ortho": {"type": "string"},
+				   "Spacingxy": {"type": "number", "default": 0.0001},
+				   "Activate_Filtering": {"type": "string"},
+				   "Filtered_Interferogram_mlran": {"type": "number"},
+				   "Filtered_Interferogram_mlazi": {"type": "number"},
+				   "Filtering_parameter_alpha": {"type": "number"}
+				  }
+		}
+	    },
+	    "additionalProperties": false,
+	    "required": ["parameter"]
+	}
+    }
+}
diff --git a/python_src/utils/compute_spectra_ImageTiff.py b/python_src/utils/compute_spectra_ImageTiff.py
old mode 100755
new mode 100644