diff --git a/Documentation/Cookbook/Art/QtImages/rescale_documentation.png b/Documentation/Cookbook/Art/QtImages/rescale_documentation.png
new file mode 100644
index 0000000000000000000000000000000000000000..70cea29e59f15f04ac7a8ccbf119aba372409c16
Binary files /dev/null and b/Documentation/Cookbook/Art/QtImages/rescale_documentation.png differ
diff --git a/Documentation/Cookbook/Art/QtImages/rescale_logs.png b/Documentation/Cookbook/Art/QtImages/rescale_logs.png
new file mode 100644
index 0000000000000000000000000000000000000000..ed6abd8f90e348f63154d77c9b593a344df38bd6
Binary files /dev/null and b/Documentation/Cookbook/Art/QtImages/rescale_logs.png differ
diff --git a/Documentation/Cookbook/Art/QtImages/rescale_param.png b/Documentation/Cookbook/Art/QtImages/rescale_param.png
new file mode 100644
index 0000000000000000000000000000000000000000..e3b9cff8ce3de5dc72fc386da55c34dc9040fada
Binary files /dev/null and b/Documentation/Cookbook/Art/QtImages/rescale_param.png differ
diff --git a/Documentation/Cookbook/Art/QtImages/rescale_progress.png b/Documentation/Cookbook/Art/QtImages/rescale_progress.png
new file mode 100644
index 0000000000000000000000000000000000000000..bedf4336133a201ddc550eceda4ef17600c7ccc8
Binary files /dev/null and b/Documentation/Cookbook/Art/QtImages/rescale_progress.png differ
diff --git a/Documentation/Cookbook/Art/S1-VV-despeckled-extract.png b/Documentation/Cookbook/Art/S1-VV-despeckled-extract.png
new file mode 100644
index 0000000000000000000000000000000000000000..6150149896542c1235fa848b52ab508e437bdde2
Binary files /dev/null and b/Documentation/Cookbook/Art/S1-VV-despeckled-extract.png differ
diff --git a/Documentation/Cookbook/Art/S1-VV-extract-int.png b/Documentation/Cookbook/Art/S1-VV-extract-int.png
new file mode 100644
index 0000000000000000000000000000000000000000..b3563a1e1054cf3f45d429e3f1af550eac42e81f
Binary files /dev/null and b/Documentation/Cookbook/Art/S1-VV-extract-int.png differ
diff --git a/Documentation/Cookbook/Art/alphahot.png b/Documentation/Cookbook/Art/alphahot.png
new file mode 100644
index 0000000000000000000000000000000000000000..c38fccf2ab6d1c5130752c70c7aa8d90e35da687
Binary files /dev/null and b/Documentation/Cookbook/Art/alphahot.png differ
diff --git a/Documentation/Cookbook/Art/anisotropyhot.png b/Documentation/Cookbook/Art/anisotropyhot.png
new file mode 100644
index 0000000000000000000000000000000000000000..8e0cd740f29cf43b3e3332ff5f92e74ad1b18d5c
Binary files /dev/null and b/Documentation/Cookbook/Art/anisotropyhot.png differ
diff --git a/Documentation/Cookbook/Art/entropyhot.png b/Documentation/Cookbook/Art/entropyhot.png
new file mode 100644
index 0000000000000000000000000000000000000000..16568232be27014786b1140b67d93c0f8b466519
Binary files /dev/null and b/Documentation/Cookbook/Art/entropyhot.png differ
diff --git a/Documentation/Cookbook/Art/sarpol_conversion_schema.png b/Documentation/Cookbook/Art/sarpol_conversion_schema.png
new file mode 100644
index 0000000000000000000000000000000000000000..fab218062b1988e58c6af658b2f4c268875e9efc
Binary files /dev/null and b/Documentation/Cookbook/Art/sarpol_conversion_schema.png differ
diff --git a/Documentation/Cookbook/Art/test-left-co-2.png b/Documentation/Cookbook/Art/test-left-co-2.png
new file mode 100644
index 0000000000000000000000000000000000000000..419da0dabee666081364efd81f513516c2601f39
Binary files /dev/null and b/Documentation/Cookbook/Art/test-left-co-2.png differ
diff --git a/Documentation/Cookbook/Art/test-left-cross-2.png b/Documentation/Cookbook/Art/test-left-cross-2.png
new file mode 100644
index 0000000000000000000000000000000000000000..590896e5c836ab3d51e729decf73ad0aa644ba7e
Binary files /dev/null and b/Documentation/Cookbook/Art/test-left-cross-2.png differ
diff --git a/Documentation/Cookbook/CMake/RunApplicationsRstGenerator.sh.cmake.in b/Documentation/Cookbook/CMake/RunApplicationsRstGenerator.sh.cmake.in
new file mode 100644
index 0000000000000000000000000000000000000000..59f8be4e926a31bbaffad2ba8e94531eac2240c4
--- /dev/null
+++ b/Documentation/Cookbook/CMake/RunApplicationsRstGenerator.sh.cmake.in
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+export PYTHONPATH=@OTB_DIR@/lib:$PYTHONPATH
+export PYTHONPATH=@OTB_DIR@/Modules/Wrappers/SWIG/src:$PYTHONPATH
+
+export OTB_APPLICATION_PATH=@OTB_DIR@/lib/otb/applications
+
+@PYTHON_EXECUTABLE@ @PROJECT_SOURCE_DIR@/Scripts/otbGenerateWrappersRstDoc.py
diff --git a/Documentation/Cookbook/CMakeLists.txt b/Documentation/Cookbook/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..50286412f82309ec834bf386b9b6550f72483d12
--- /dev/null
+++ b/Documentation/Cookbook/CMakeLists.txt
@@ -0,0 +1,10 @@
+cmake_minimum_required(VERSION 2.8)
+
+# This is the Orfeo ToolBox cookbook : a guide for non-developpers
+project(CookBook)
+
+# need an OTB build dir
+find_package(OTB REQUIRED)
+find_package(PythonInterp REQUIRED)
+
+add_subdirectory(rst)
diff --git a/Documentation/Cookbook/Scripts/otbGenerateWrappersRstDoc.py b/Documentation/Cookbook/Scripts/otbGenerateWrappersRstDoc.py
new file mode 100755
index 0000000000000000000000000000000000000000..13463fbcab9af1bc957b1d446efbe6fcca3788b1
--- /dev/null
+++ b/Documentation/Cookbook/Scripts/otbGenerateWrappersRstDoc.py
@@ -0,0 +1,513 @@
+#!/usr/bin/python
+import otbApplication
+import os
+import sys
+import glob
+from optparse import OptionParser
+
+##############################################################################
+# Parameters
+linesep = os.linesep
+pixeltypes = {' uchar' : 1, ' int8' : 0, ' uint8' : 1, ' int16' : 2, ' uint16': 3, ' int32' : 4, ' uint32' : 5, ' float' : 6, ' double': 7}
+
+import re
+
+#Special/Exceptional cases
+def RstifyDescription(s):
+    s = s.replace(':\n', ':\n\n')
+    s = s.replace('\n', ' ')
+    s = s.replace('*','\*')
+    if not len(s) == 0 and not s.endswith('.'):
+        s += '.'
+    return s
+
+def ConvertString(s):
+    '''Convert a string for compatibility in txt dump'''
+    s = s.strip()
+    s = s.replace('*','\*')
+    return s
+
+
+def EncloseString(s):
+    if not s.startswith("\"") :
+        s = "\"" + s
+    if not s.endswith("\""):
+        s = s + "\""
+    return s
+
+def ExpandPath(filename,path,exp):
+    if not exp:
+        return filename
+    else:
+        # Avoid chasing our tails
+        (head,tail) = os.path.split(filename)
+        if len(tail) > 0:
+            filename = tail
+        for dir,dirs,files in os.walk(path):
+            for file in files:
+                if file == filename:
+                    return os.path.join(dir,file)
+        return os.path.join(path,filename)
+
+def GetPixelType(value):
+    # look for type
+    foundcode = -1
+    foundname = ""
+    for ptypename, ptypecode in pixeltypes.iteritems():
+        if value.endswith(ptypename):
+            foundcode = ptypecode
+            foundname = ptypename
+            break
+    return foundcode,foundname
+
+def GetParametersDepth(paramlist):
+    depth = 0
+    for param in paramlist:
+        depth = max(param.count("."),depth)
+    return depth
+
+def GenerateChoice(app,param,paramlist, count = 0):
+    output = " Available choices are: " + linesep
+    spaces = ' ' * count
+    for (choicekey,choicename) in zip(app.GetChoiceKeys(param),app.GetChoiceNames(param)):
+        output += linesep + spaces + "- **"+ ConvertString(choicename) + "**"
+        choicedesc = app.GetParameterDescription(param+"."+choicekey)
+        if len(choicedesc) >= 2:
+            output+= " : " + ConvertString(choicedesc)
+        output += linesep + linesep
+        # List option associated to one choice
+        options = []
+        for p in paramlist:
+            if p.startswith(param+"."+choicekey+"."):
+                options.append(p)
+        if len(options) > 0:
+            count += 1
+            spaces = ' ' * count
+            for option in options:
+                output+= linesep + spaces + "- **"+ ConvertString(app.GetParameterName(option))+ "** : " + RstifyDescription(app.GetParameterDescription(option)) + linesep
+            output+= linesep
+    return output
+
+def GenerateParameterType(app,param):
+    if app.GetParameterType(param) == otbApplication.ParameterType_Empty:
+        return "Boolean"
+    if app.GetParameterType(param) == otbApplication.ParameterType_Int \
+       or app.GetParameterType(param) == otbApplication.ParameterType_Radius \
+       or app.GetParameterType(param) == otbApplication.ParameterType_RAM:
+        return "Int"
+    if app.GetParameterType(param) == otbApplication.ParameterType_Float:
+        return "Float"
+    if app.GetParameterType(param) == otbApplication.ParameterType_String:
+        return "String"
+    if app.GetParameterType(param) == otbApplication.ParameterType_StringList:
+        return "String list"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputFilename :
+        return "Input File name"
+    if app.GetParameterType(param) == otbApplication.ParameterType_OutputFilename :
+        return "Output File name"
+    if app.GetParameterType(param) == otbApplication.ParameterType_Directory :
+        return "Directory"
+    if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+        return "Choices"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputImage \
+            or app.GetParameterType(param) == otbApplication.ParameterType_ComplexInputImage:
+        return "Input image"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputVectorData:
+        return "Input vector data"
+    if app.GetParameterType(param) == otbApplication.ParameterType_OutputImage \
+            or app.GetParameterType(param) == otbApplication.ParameterType_ComplexOutputImage :
+        return "Output image"
+    if app.GetParameterType(param) == otbApplication.ParameterType_OutputVectorData:
+        return "Output vector data"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputImageList:
+        return "Input image list"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputVectorDataList:
+        return "Input vector data list"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputFilenameList :
+        return "Input File name list"
+    if app.GetParameterType(param) == otbApplication.ParameterType_ListView:
+        return "List"
+    if app.GetParameterType(param) == otbApplication.ParameterType_Group:
+        return "Group"
+    if app.GetParameterType(param) == otbApplication.ParameterType_InputProcessXML:
+        return "XML input parameters file"
+    if app.GetParameterType(param) == otbApplication.ParameterType_OutputProcessXML:
+        return "XML output parameters file"
+
+def FindLengthOfLargestColumnText(app,paramlist):
+    output= ""
+    colLength = [2] * 3
+    for param in paramlist:
+        if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+            for (choicekey,choicename) in zip(app.GetChoiceKeys(param),app.GetChoiceNames(param)):
+                lenp= len(param + " " + choicekey)
+                if colLength[0] < lenp:
+                    colLength[0] = lenp
+                lenpdescr = len(choicename)
+                if colLength[2] < lenpdescr:
+                    colLength[2] = lenpdescr
+        else:
+            if colLength[0] < len(param):
+                colLength[0] = len(param)
+            lenpdescr = len(app.GetParameterName(param))
+            if colLength[2] < lenpdescr:
+                colLength[2] = lenpdescr
+        lenptype = len(GenerateParameterType(app,param))
+        if colLength[1] < lenptype:
+            colLength[1] = lenptype
+    return colLength
+
+def RstTableHeaderLine(strlist, listlen, delimiter):
+    line = "+"
+    for i in xrange(len(strlist)):
+        line += delimiter * listlen[i] + '+'
+    line += linesep
+    return line
+
+def RstTableHeading(strlist, listlen):
+    heading = RstTableHeaderLine(strlist, listlen, '-')
+    for i in xrange(len(strlist)):
+         spaces = ' ' * ((listlen[i] - len(strlist[i])) )
+         heading += '|' + strlist[i] +  spaces
+    heading += '|' + linesep
+    heading += RstTableHeaderLine(strlist, listlen, '=')
+    return heading
+
+def MakeText(text, size):
+    dsize = (size - len(text))
+    output= '|' + text  + ' ' * (dsize)
+    return output
+
+def GenerateParametersTable(app,paramlist):
+    colLength = FindLengthOfLargestColumnText(app, paramlist)
+    output = linesep + ".. [#] Table: Parameters table for " + ConvertString(app.GetDocName()) + "." + linesep + linesep
+    headerlist = ["Parameter Key", "Parameter Type", "Parameter Description"]
+    for i in xrange(len(headerlist)):
+        colLength[i] = len(headerlist[i]) if colLength[i] < len(headerlist[i]) else colLength[i]
+    output += RstTableHeading(headerlist, colLength)
+    for param in paramlist:
+        output += MakeText(param, colLength[0])
+        output += MakeText(GenerateParameterType(app,param), colLength[1])
+        output += MakeText(GenerateParameterType(app,param), colLength[2])
+        output += '|' + linesep
+        output += RstTableHeaderLine(headerlist, colLength, '-')
+        if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+            for (choicekey,choicename) in zip(app.GetChoiceKeys(param),app.GetChoiceNames(param)):
+                output += MakeText(param + " " + choicekey, colLength[0])
+                output += MakeText(" *Choice*" ,colLength[1])
+                output += MakeText(choicename, colLength[2])
+                output += '|' + linesep
+                output += RstTableHeaderLine(headerlist, colLength, '-')
+    return output
+
+def unique(seq):
+    # order preserving
+    checked = []
+    for e in seq:
+        if e not in checked:
+            checked.append(e)
+    return checked
+
+def ApplicationParametersToRst(app,paramlist,deep = False,current=""):
+    output = ""
+    # First run
+    if len(current)==0:
+        output += "This section describes in details the parameters available for this application. Table [#]_ presents a summary of these parameters and the parameters keys to be used in command-line and programming languages. Application key is *" + app.GetName() + "* ."  + linesep
+        output += GenerateParametersTable(app,paramlist)
+        firstlevelparams = []
+        for param in paramlist:
+            paramsplit = param.partition(".")
+            firstlevelparams.append(paramsplit[0])
+        firstlevelparams = unique(firstlevelparams)
+
+        if deep:
+            for param in firstlevelparams:
+                output += linesep
+                output += "**" + ConvertString(app.GetParameterName(param)) + "**" + linesep
+                output += RstifyDescription(app.GetParameterDescription(param))
+                if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+                    output += GenerateChoice(app,param,paramlist)
+                    output += linesep
+                else:
+                    output += linesep
+                    output += ApplicationParametersToRst(app,paramlist,deep,param)
+        else:
+            output+= linesep
+            for param in firstlevelparams:
+                output+= "- **"+ ConvertString(app.GetParameterName(param))+ ":** " + RstifyDescription(app.GetParameterDescription(param))
+                if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+                    output += GenerateChoice(app,param,paramlist)
+                output += linesep + linesep
+            output+=  linesep
+    else:
+        currentlevelparams = []
+        for param in paramlist:
+            if param.startswith(current+".") and param.count(".") == current.count(".")+1:
+                currentlevelparams.append(param)
+        if len(currentlevelparams) > 0:
+            output+= linesep
+            for param in currentlevelparams:
+                output+= "- **"+ ConvertString(app.GetParameterName(param))+ ":** " + RstifyDescription(app.GetParameterDescription(param)) + linesep
+                output+= ApplicationParametersToRst(app,paramlist,deep,param) + linesep
+                if app.GetParameterType(param) ==  otbApplication.ParameterType_Choice:
+                    output += GenerateChoice(app,param,paramlist, 1)
+            output+= linesep
+
+    return output
+
+def GetApplicationExampleCommandLine(app,idx):
+
+    output = "%s%s%s\t%s" % ("::", linesep , linesep, "otbcli_")
+    output+= ConvertString(app.GetName())
+    for i in range(0, app.GetExampleNumberOfParameters(idx)):
+        output+=" -" + app.GetExampleParameterKey(idx,i)+ " " + app.GetExampleParameterValue(idx,i)
+    output += linesep + linesep
+    return output
+
+def GetApplicationExamplePythonSnippet(app,idx,expand = False, inputpath="",outputpath=""):
+    appname = app.GetName()
+    printable = []
+    output = linesep + "::" + linesep + linesep
+    output+= "\t#!/usr/bin/python" + linesep
+
+    output+= linesep
+    output+= "\t# Import the otb applications package" + linesep
+    output+= "\timport otbApplication" + linesep + linesep
+    output+= "\t# The following line creates an instance of the " + ConvertString(app.GetName()) + " application " + linesep
+    output+= "\t" + ConvertString(app.GetName()) + " = otbApplication.Registry.CreateApplication(\"" + ConvertString(app.GetName()) + "\")" + linesep + linesep
+    output+= "\t# The following lines set all the application parameters:" + linesep
+    for i in range(0, app.GetExampleNumberOfParameters(idx)):
+        param = app.GetExampleParameterKey(idx,i)
+        value = app.GetExampleParameterValue(idx,i)
+        paramtype = app.GetParameterType(param)
+        paramrole = app.GetParameterRole(param)
+        if paramtype == otbApplication.ParameterType_ListView:
+            break
+        if paramtype == otbApplication.ParameterType_Group:
+            break
+        if paramtype ==  otbApplication.ParameterType_Choice:
+            #app.SetParameterString(param,value)
+            output+= "\t" + appname + ".SetParameterString(" + EncloseString(param) + "," + EncloseString(value) + ")" + linesep
+        if paramtype == otbApplication.ParameterType_Empty:
+            app.SetParameterString(param,"1")
+            output+= "\t" + appname + ".SetParameterString("+EncloseString(param)+",\"1\")" + linesep
+        if paramtype == otbApplication.ParameterType_Int \
+                or paramtype == otbApplication.ParameterType_Radius \
+                or paramtype == otbApplication.ParameterType_RAM:
+            # app.SetParameterString(param,value)
+            output += "\t" + appname + ".SetParameterInt("+EncloseString(param)+", "+value+")" + linesep
+        if paramtype == otbApplication.ParameterType_Float:
+            # app.SetParameterString(param,value)
+            output += "\t" + appname + ".SetParameterFloat("+EncloseString(param)+", "+value + ")" + linesep
+        if paramtype == otbApplication.ParameterType_String:
+            # app.SetParameterString(param,value)
+            output+= "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(value)+")" + linesep
+        if paramtype == otbApplication.ParameterType_StringList:
+            values = value.split(" ")
+            # app.SetParameterStringList(param,values)
+            output += "\t" + appname + ".SetParameterStringList("+EncloseString(param)+", "+str(values)+")" + linesep
+        if paramtype == otbApplication.ParameterType_InputFilename \
+            or paramtype == otbApplication.ParameterType_OutputFilename \
+            or paramtype == otbApplication.ParameterType_Directory:
+            if paramrole == 0:
+                # app.SetParameterString(param,EncloseString(ExpandPath(value,inputpath,expand)))
+                output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value,inputpath,expand)) + ")" + linesep
+                printable.append(["in","file",ExpandPath(value,inputpath,expand)])
+            elif paramrole == 1:
+                # app.SetParameterString(param,EncloseString(ExpandPath(value,outputpath,expand)))
+                output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value,outputpath,expand))+")" + linesep
+                printable.append(["out","file",ExpandPath(value,inputpath,expand)])
+        if paramtype == otbApplication.ParameterType_InputImage :
+            # app.SetParameterString(param,EncloseString(ExpandPath(value,inputpath,expand)))
+            output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value,inputpath,expand))+")"+linesep
+            printable.append(["in","img",ExpandPath(value,inputpath,expand)])
+        if paramtype == otbApplication.ParameterType_ComplexInputImage:
+            # app.SetParameterString(param,EncloseString(ExpandPath(value,inputpath,expand)))
+            output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value,inputpath,expand))+")" + linesep
+            printable.append(["in","cimg",ExpandPath(value,inputpath,expand)])
+        if paramtype == otbApplication.ParameterType_InputVectorData:
+            # app.SetParameterString(param,EncloseString(ExpandPath(value,inputpath,expand)))
+            output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value,inputpath,expand))+")" + linesep
+            printable.append(["in","vdata",ExpandPath(value,inputpath,expand)])
+        if paramtype == otbApplication.ParameterType_OutputImage :
+            foundcode,foundname = GetPixelType(value)
+            if foundcode != -1:
+                # app.SetParameterString(param,EncloseString(ExpandPath(value[:-len(foundname),outputpath,expand))))
+                output += "\t" + appname + ".SetParameterString("+EncloseString(param)+", "+EncloseString(ExpandPath(value[:-len(foundname)],outputpath,expand))+")" + linesep
+           #app.SetParameterOutputImagePixelType(param,foundcode)
+                if foundcode == 1:
+                    printable.append(["out","ucimg",ExpandPath(value[:len(foundname)],inputpath,expand)])
+                else:
+                    printable.append(["out","img",ExpandPath(value[:len(foundname)],inputpath,expand)])
+                output += "\t" + appname + ".SetParameterOutputImagePixelType("+EncloseString(param)+", "+str(foundcode)+")" + linesep
+            else:
+                # app.SetParameterString(param,EncloseString(ExpandPath(value,outputpath,expand)))
+                output += "\t" + appname +".SetParameterString("+EncloseString(param)+", "+ EncloseString(ExpandPath(value,outputpath,expand)) + ")" + linesep
+                printable.append(["out","img",ExpandPath(value,outputpath,expand)])
+        if paramtype == otbApplication.ParameterType_ComplexOutputImage :
+            # TODO: handle complex type properly
+            # app.SetParameterString(param,EncloseString(ExpandPath(value,outputpath,expand)))
+            output += "\t" + appname +".SetParameterString("+EncloseString(param)+", "+ EncloseString(ExpandPath(value,outputpath,expand)) + ")" + linesep
+            printable.append(["out","cimg",ExpandPath(value,outputpath,expand)])
+        if paramtype == otbApplication.ParameterType_OutputVectorData:
+            # app.SetParameterString(param,EncloseString(ExpandPath(value,outputpath,expand)))
+            output += "\t" + appname +".SetParameterString("+EncloseString(param)+", "+ EncloseString(ExpandPath(value,outputpath,expand)) + ")" + linesep
+            printable.append(["out","vdata",ExpandPath(value,outputpath,expand)])
+        if paramtype == otbApplication.ParameterType_InputImageList:
+            values = value.split(" ")
+            values = [ExpandPath(val,inputpath,expand) for val in values]
+            # app.SetParameterStringList(param,values)
+            output += "\t" + appname + ".SetParameterStringList("+EncloseString(param) + ", " + str(values) + ")" + linesep
+        if paramtype == otbApplication.ParameterType_InputVectorDataList:
+            values = value.split(" ")
+            values = [ExpandPath(val,inputpath,expand) for val in values]
+            #app.SetParameterStringList(param,values)
+            output += "\t" + appname + ".SetParameterStringList("+EncloseString(param)+ ", " + str(values) + ")" + linesep
+        output+=linesep
+    output += "\t# The following line execute the application" + linesep
+    output+= "\t" + appname + ".ExecuteAndWriteOutput()"+ linesep
+    return output,printable
+
+def GetApplicationExamplePython(app,idx):
+    output, printable = GetApplicationExamplePythonSnippet(app,idx)
+    output+= linesep
+    return output
+
+def RstHeading(text, delimiter):
+    heading = text + linesep
+    heading += delimiter * len(text)  + linesep
+    heading += linesep
+    return heading
+
+def ApplicationToRst(appname):
+    output = ""
+    app = otbApplication.Registry.CreateApplication(appname)
+    # TODO: remove this when bug 440 is fixed
+    app.Init()
+    output += RstHeading(app.GetDocName(), '^')
+    output += app.GetDescription() + linesep * 2
+    output += RstHeading("Detailed description", '-')
+    output += app.GetDocLongDescription() + linesep * 2
+    limitations = app.GetDocLimitations()
+    output += RstHeading("Parameters", '-')
+    depth = GetParametersDepth(app.GetParametersKeys())
+    deep = depth > 0
+    output += ApplicationParametersToRst(app,app.GetParametersKeys(),deep) + linesep
+    if app.GetNumberOfExamples() > 1:
+        output += RstHeading("Examples", '-') + linesep
+        #output += appdetailslevel + "{Examples}" + "\\label{appexamples:" + appname + "}" + linesep
+        for i in range(0,app.GetNumberOfExamples()):
+            output += ":Example "+  str(i+1) + ':' + linesep + linesep
+#            output += RstHeading("Example "+  str(i+1) , '-')
+            output += app.GetExampleComment(i)
+            output+= "To run this example in command-line, use the following: " + linesep
+            output += linesep + GetApplicationExampleCommandLine(app,i)
+            output+= "To run this example from Python, use the following code snippet: " + linesep
+            output += GetApplicationExamplePython(app,i)
+    elif app.GetNumberOfExamples() == 1:
+        output += RstHeading("Example", '-')
+        if( len(app.GetExampleComment(0)) > 1):
+            output += app.GetExampleComment(0)
+        output+= "To run this example in command-line, use the following: " + linesep
+        output += GetApplicationExampleCommandLine(app,0)
+        output+= "To run this example from Python, use the following code snippet: " + linesep
+        output += GetApplicationExamplePython(app,0)
+
+    if len(limitations)>=2:
+        output += RstHeading("Limitations", '~')
+#        output += ":Limitations:" + linesep + linesep
+        output += ConvertString(app.GetDocLimitations()) + linesep + linesep
+
+    output += RstHeading("Authors", '~')
+#    output += ":Authors:" + linesep + linesep
+    output += "This application has been written by " + ConvertString(app.GetDocAuthors()) + "." + linesep + linesep
+    seealso = app.GetDocSeeAlso()
+    if len(seealso) >=2:
+        output += RstHeading("See Also", '~')
+#        output += ":See Also:" + linesep + linesep
+        output += "These additional ressources can be useful for further information: " + linesep
+        # hlink="<http://www.readthedocs.org/" + ConvertString(app.GetDocSeeAlso()) + ".html>`_ "
+        # output += linesep + "`" + ConvertString(app.GetDocSeeAlso()) + " " + hlink + linesep + linesep
+        output += linesep + ConvertString(app.GetDocSeeAlso()) + linesep + linesep
+
+    return output
+
+def GetApplicationTags(appname):
+     app = otbApplication.Registry.CreateApplication(appname)
+     return app.GetDocTags()
+
+import shutil
+
+def RstPageHeading(text):
+    output = RstHeading(text, "=") + linesep
+    output += ".. toctree::" + linesep
+    output += "\t:maxdepth: 2" + linesep
+    output += linesep + linesep
+    return output
+
+def GenerateRstForApplications():
+    out = ""
+    blackList = ["TestApplication", "Example"]
+    appIndexFile = open('Applications.rst', 'w')
+    appNames = [app for app in otbApplication.Registry.GetAvailableApplications() if app not in blackList]
+    if not appNames:
+			print 'No OTB applications available. Please check ITK_AUTOLOAD_PATH env variable'
+			sys.exit(1)
+    sectionTags = ["Image Manipulation","Vector Data Manipulation", "Calibration","Geometry", "Image Filtering","Feature Extraction","Stereo","Learning","Segmentation"]
+    appIndexFile.write(RstPageHeading("Applications"))
+
+    for tag in sectionTags:
+        directory= "Applications/" + tag
+        if not os.path.exists(directory):
+            os.makedirs(directory)
+        tag_ = tag.replace(' ', '_')
+        appIndexFile.write('\tApplications/' + tag_ + '.rst' + linesep)
+        #chapterIndexFile = open('Applications/' + tag + '.rst', 'w')
+        #chapterIndexFile.write(RstPageHeading(tag))
+        #print linesep + RstHeading(tag, '=')
+        appsRemoved = []
+        for appName in appNames:
+            apptags = GetApplicationTags(appName)
+
+            if apptags.count(tag) > 0:
+                print "Generating " + appName + ".rst"
+                #chapterIndexFile.write("\t" + tag + '/' + appName + linesep)
+                appFile = open('Applications/app_'  + appName + '.rst', 'w')
+                out = ApplicationToRst(appName)
+                appFile.write(out)
+                appFile.close()
+                appsRemoved.append(appName)
+        for appName in appsRemoved:
+            appNames.remove(appName)
+        #chapterIndexFile.close()
+
+    misctag = "Miscellaneous" #should this be Utilities
+    if not os.path.exists("Applications/" + misctag):
+        os.makedirs("Applications/" + misctag)
+
+    appIndexFile.write('\tApplications/' + misctag + linesep)
+    appIndexFile.close()
+    #miscChapterIndexFile = open("Applications/" + misctag + '.rst', 'w')
+    #miscChapterIndexFile.write(RstPageHeading(misctag))
+    for appName in appNames:
+        print "Generating " + appName + ".rst"
+        appFile = open("Applications/app_" +  appName + ".rst", 'w')
+        out = ApplicationToRst(appName)
+        appFile.write(out)
+        appFile.close()
+        #miscChapterIndexFile.write('\t' + misctag + '/' + appName + linesep)
+        out = ""
+    return out
+
+
+# Start parsing options
+parser = OptionParser(usage="Export application(s) to tex or pdf file.")
+parser.add_option("-a",dest="appname",help="Generate rst only for this application (eg: OrthoRectification)")
+parser.add_option("-m",dest="module",help="Generate rst only for this module (eg: Image Manipulation)")
+(options, args) = parser.parse_args()
+
+if not options.appname is None:
+    out = ApplicationToRst(options.appname)
+    #print out
+else:
+    GenerateRstForApplications()
diff --git a/Documentation/Cookbook/rst/Applications.rst b/Documentation/Cookbook/rst/Applications.rst
new file mode 100644
index 0000000000000000000000000000000000000000..046e3cf2e2f5b14c2e4060053a8044f10e93b97a
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications.rst
@@ -0,0 +1,17 @@
+Applications
+============
+
+
+.. toctree::
+	:maxdepth: 2
+
+	Applications/Image_Manipulation.rst
+	Applications/Vector_Data_Manipulation.rst
+	Applications/Calibration.rst
+	Applications/Geometry.rst
+	Applications/Image_Filtering.rst
+	Applications/Feature_Extraction.rst
+	Applications/Stereo.rst
+	Applications/Learning.rst
+	Applications/Segmentation.rst
+	Applications/Miscellaneous.rst
diff --git a/Documentation/Cookbook/rst/Applications/Calibration.rst b/Documentation/Cookbook/rst/Applications/Calibration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..11717b678cc9f90012ea9151e094374b22edd6bc
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Calibration.rst
@@ -0,0 +1,9 @@
+Calibration
+===========
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_OpticalCalibration
+	app_SarRadiometricCalibration
diff --git a/Documentation/Cookbook/rst/Applications/Feature_Extraction.rst b/Documentation/Cookbook/rst/Applications/Feature_Extraction.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7b95075931dadadd8cc9a817581bc4426a289e2a
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Feature_Extraction.rst
@@ -0,0 +1,21 @@
+Feature Extraction
+==================
+
+
+.. toctree::
+	:maxdepth: 1
+
+
+	app_BinaryMorphologicalOperation
+	app_ComputePolylineFeatureFromImage
+	app_DSFuzzyModelEstimation
+	app_EdgeExtraction
+	app_GrayScaleMorphologicalOperation
+        app_HaralickTextureExtraction
+	app_HomologousPointsExtraction
+	app_LineSegmentDetection
+	app_LocalStatisticExtraction
+	app_MultivariateAlterationDetector
+	app_RadiometricIndices
+	app_SFSTextureExtraction
+	app_VectorDataDSValidation
diff --git a/Documentation/Cookbook/rst/Applications/Geometry.rst b/Documentation/Cookbook/rst/Applications/Geometry.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1d23508af71e9e431b480a392a778cb6cc7ccda0
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Geometry.rst
@@ -0,0 +1,19 @@
+Geometry
+========
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_BundleToPerfectSensor
+	app_ConvertCartoToGeoPoint
+	app_ConvertSensorToGeoPoint
+	app_GeneratePlyFile
+	app_GenerateRPCSensorModel
+	app_GridBasedImageResampling
+	app_ImageEnvelope
+	app_OrthoRectification
+	app_Pansharpening
+	app_RefineSensorModel
+	app_RigidTransformResample
+	app_Superimpose
diff --git a/Documentation/Cookbook/rst/Applications/Image_Filtering.rst b/Documentation/Cookbook/rst/Applications/Image_Filtering.rst
new file mode 100644
index 0000000000000000000000000000000000000000..39300f14b757a175f25644f86946373f9eec8753
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Image_Filtering.rst
@@ -0,0 +1,12 @@
+Image Filtering
+===============
+
+
+.. toctree::
+	:maxdepth: 1
+
+
+	app_Despeckle
+	app_DimensionalityReduction
+	app_MeanShiftSmoothing
+	app_Smoothing
diff --git a/Documentation/Cookbook/rst/Applications/Image_Manipulation.rst b/Documentation/Cookbook/rst/Applications/Image_Manipulation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..891a53cbda5d91e6102d43a9c238d497394ae7e1
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Image_Manipulation.rst
@@ -0,0 +1,21 @@
+Image Manipulation
+==================
+
+
+.. toctree::
+	:maxdepth: 1
+
+
+	app_ColorMapping
+	app_ConcatenateImages
+	app_Convert
+	app_DownloadSRTMTiles
+	app_ExtractROI
+	app_MultiResolutionPyramid
+	app_Quicklook
+	app_ReadImageInfo
+	app_Rescale
+	app_SplitImage
+	app_TileFusion
+	app_ManageNoData
+	app_DEMConvert
diff --git a/Documentation/Cookbook/rst/Applications/Learning.rst b/Documentation/Cookbook/rst/Applications/Learning.rst
new file mode 100644
index 0000000000000000000000000000000000000000..15626922695842754dee2c051f83e4f584b2533d
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Learning.rst
@@ -0,0 +1,17 @@
+Learning
+========
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_ClassificationMapRegularization
+	app_ComputeConfusionMatrix
+	app_ComputeImagesStatistics
+	app_FusionOfClassifications
+	app_ImageClassifier
+	app_KMeansClassification
+	app_SOMClassification
+	app_TrainImagesClassifier
+	app_PredictRegression
+	app_TrainRegression
diff --git a/Documentation/Cookbook/rst/Applications/Miscellaneous.rst b/Documentation/Cookbook/rst/Applications/Miscellaneous.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67289bd885f30b4b5eeb247360a060d85c757125
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Miscellaneous.rst
@@ -0,0 +1,16 @@
+Miscellaneous
+=============
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_BandMath
+	app_BandMathX
+	app_CompareImages
+	app_HyperspectralUnmixing
+	app_KmzExport
+	app_OSMDownloader
+	app_ObtainUTMZoneFromGeoPoint
+	app_PixelValue
+	app_VertexComponentAnalysis
diff --git a/Documentation/Cookbook/rst/Applications/Segmentation.rst b/Documentation/Cookbook/rst/Applications/Segmentation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f9530fa784c50504037aa1ad64848b5d59ce80d6
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Segmentation.rst
@@ -0,0 +1,16 @@
+Segmentation
+============
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_ComputeOGRLayersFeaturesStatistics
+	app_ConnectedComponentSegmentation
+	app_HooverCompareSegmentation
+	app_LSMSSegmentation
+	app_LSMSSmallRegionsMerging
+	app_LSMSVectorization
+	app_OGRLayerClassifier
+	app_Segmentation
+	app_TrainOGRLayersClassifier
diff --git a/Documentation/Cookbook/rst/Applications/Stereo.rst b/Documentation/Cookbook/rst/Applications/Stereo.rst
new file mode 100644
index 0000000000000000000000000000000000000000..feadf0508abcd751e1eb771cde429042d25aa41f
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Stereo.rst
@@ -0,0 +1,12 @@
+Stereo
+======
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_BlockMatching
+	app_DisparityMapToElevationMap
+	app_FineRegistration
+	app_StereoFramework
+	app_StereoRectificationGridGenerator
diff --git a/Documentation/Cookbook/rst/Applications/Vector_Data_Manipulation.rst b/Documentation/Cookbook/rst/Applications/Vector_Data_Manipulation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..09f55741573142c04285c50fa5a454b3f5c48854
--- /dev/null
+++ b/Documentation/Cookbook/rst/Applications/Vector_Data_Manipulation.rst
@@ -0,0 +1,13 @@
+Vector Data Manipulation
+========================
+
+
+.. toctree::
+	:maxdepth: 1
+
+	app_ConcatenateVectorData
+	app_Rasterization
+	app_VectorDataExtractROI
+	app_VectorDataReprojection
+	app_VectorDataSetField
+	app_VectorDataTransform
diff --git a/Documentation/Cookbook/rst/CMakeLists.txt b/Documentation/Cookbook/rst/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6efd3884acea01e7261d923d070a6c44b8d1ac99
--- /dev/null
+++ b/Documentation/Cookbook/rst/CMakeLists.txt
@@ -0,0 +1,75 @@
+set(RST_GENERATED_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR}/_sources)
+set(OTB_FULL_VERSION 5.2.0)
+set(OTB_COPYRIGHT_TEXT "2014, OTB Team")
+set(RST_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build")
+set(RST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR})
+
+#make some directories
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/Applications)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/recipes)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/Art)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/Art/MonteverdiImages)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/Art/QtImages)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/_static)
+execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${RST_GENERATED_SOURCE_DIR}/_templates)
+# Auto-generate reference documentation in build directory
+
+configure_file(${CookBook_SOURCE_DIR}/CMake/RunApplicationsRstGenerator.sh.cmake.in
+  ${RST_GENERATED_SOURCE_DIR}/RunApplicationsRstGenerator.sh
+  @ONLY)
+
+file(GLOB mvd_images ${CookBook_SOURCE_DIR}/Art/MonteverdiImages/*.*)
+foreach(mvd_image ${mvd_images})
+#  message(${mvd_image})
+  get_filename_component(out_file ${mvd_image} NAME)
+  configure_file(${mvd_image} ${RST_GENERATED_SOURCE_DIR}/Art/MonteverdiImages/${out_file} COPYONLY)
+endforeach()
+
+file(GLOB qt_images ${CookBook_SOURCE_DIR}/Art/QtImages/*.png)
+foreach(qt_image ${qt_images})
+  get_filename_component(out_file ${qt_image} NAME)
+  configure_file(${qt_image} ${RST_GENERATED_SOURCE_DIR}/Art/QtImages/${out_file} COPYONLY)
+endforeach()
+
+file(GLOB rst_sources1 ${RST_SOURCES}/*.rst)
+foreach(rst_file ${rst_sources1})
+  get_filename_component(out_file ${rst_file} NAME)
+  configure_file(${rst_file} ${RST_GENERATED_SOURCE_DIR}/${out_file} COPYONLY)
+endforeach()
+
+file(GLOB rst_sources2 ${RST_SOURCES}/recipes/*.rst)
+foreach(rst_file ${rst_sources2})
+  get_filename_component(out_file ${rst_file} NAME)
+  configure_file(${rst_file} ${RST_GENERATED_SOURCE_DIR}/recipes/${out_file} COPYONLY)
+endforeach()
+
+file(GLOB rst_sources3 ${RST_SOURCES}/Applications/*.rst)
+foreach(rst_file ${rst_sources3})
+  get_filename_component(out_file ${rst_file} NAME)
+  configure_file(${rst_file} ${RST_GENERATED_SOURCE_DIR}/Applications/${out_file} COPYONLY)
+endforeach()
+
+configure_file(${RST_SOURCES}/conf.py.in ${RST_GENERATED_SOURCE_DIR}/conf.py @ONLY)
+configure_file(${RST_SOURCES}/Makefile.in ${RST_GENERATED_SOURCE_DIR}/Makefile @ONLY)
+
+add_custom_target(generate_otbapps_rst
+  ALL
+  COMMAND bash ${RST_GENERATED_SOURCE_DIR}/RunApplicationsRstGenerator.sh
+  WORKING_DIRECTORY ${RST_GENERATED_SOURCE_DIR}
+  COMMENT "Auto-generating Application Reference Documentation in RST"
+  )
+
+add_custom_target(CookBookHTMLFromRST
+  ALL
+  COMMAND ${CMAKE_MAKE_PROGRAM} html
+  WORKING_DIRECTORY ${RST_GENERATED_SOURCE_DIR}
+  DEPENDS generate_otbapps_rst
+  COMMENT "Building RST documentation in html")
+
+# ADD_CUSTOM_TARGET(CookBookHTMLFromPDF
+#   ALL
+#   COMMAND ${CMAKE_MAKE_PROGRAM} pdf
+#   WORKING_DIRECTORY ${RST_SOURCES}/RST
+#   COMMENT "Building RST documentation in html")
+
+# ADD_DEPENDENCIES(CookBookHTMLFromPDF CookBookHTMLFromRST)
\ No newline at end of file
diff --git a/Documentation/Cookbook/rst/Makefile.in b/Documentation/Cookbook/rst/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..83ba8f3b2f8580bb6ff65d092a091e54f612cd3a
--- /dev/null
+++ b/Documentation/Cookbook/rst/Makefile.in
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = @RST_BUILD_DIR@
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/OTB.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/OTB.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/OTB"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/OTB"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/Documentation/Cookbook/rst/Monteverdi.rst b/Documentation/Cookbook/rst/Monteverdi.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0b022ad8128bdd66f3da42c0f86c82bdf24c47d7
--- /dev/null
+++ b/Documentation/Cookbook/rst/Monteverdi.rst
@@ -0,0 +1,567 @@
+A brief tour of 
+================
+
+Introduction
+------------
+
+The package makes available a set of simple software tools, designed to
+demonstrate what can be done with . Many users started using these
+applications for real processing tasks, so we tried to make them more
+generic, more robust and easy to use. users have been asking for an
+integrated application for a while, since using several applications for
+a complete processing (ortho-rectification, segmentation,
+classification, etc.) can be a burden. The OTB team received a request
+from CNES’ Strategy and Programs Office in order to provide an
+integrated application for capacity building activities (teaching,
+simple image manipulation, etc.). The specifications included ease of
+integration of new processing modules.
+
+**Warning** : since version 3.0.0, there is only one , which replaces
+monteverdi2 and the original monteverdi 1 (version lower than 1.24.0).
+
+Installation
+------------
+
+The application is called , since this is the name of the Orfeo
+composer. The application allows you to build interactively remote
+sensing processes based on the . This is also in remembering of the
+great (and once open source) Khoros/Cantata software.
+
+Installation of is very simple. The standalone packages described for
+the installation of (see [sec:appinstall]) also contain and Mapla. Get
+the latest information on binary packages on the in the Download
+section.
+
+In this section we will focus on alternative ways that are specific for
+. We will describe how to install it on:
+
+-  for Windows platform (Seven and later)
+
+-  for 64bit Linux distribution
+
+-  for MacOS X 10.10
+
+If you want to build from source or if we don’t provide packages for
+your system, some informations are available into the , in the section
+**(**\ Building from Source). Note that the git repository of is located
+here : https://git.orfeo-toolbox.org/monteverdi2.git. In order to get
+the source of , you will have to checkout the right version. For
+instance, use :
+
+::
+
+    git checkout 3.0.0-rc1
+
+to get 3.0.0 Release Candidate version.
+
+Windows
+~~~~~~~
+
+We provide for Windows Seven and later through standalone packages. They
+are cross-compiled with MinGW, for 32bit and 64bit platforms. They
+contain with all . These packages are the same as the one used to
+install . Check the download page : `OTB Download
+page <https://www.orfeo-toolbox.org/download>`__
+
+There is a 32bit and a 64bit version. They contain the same directory
+structure:
+
+-  ``monteverdi.bat`` : A launcher script for
+
+-  ``mapla.bat`` : A launcher script for Mapla
+
+-  ``otbenv.bat`` : A script to initialize the environment for OTB
+   executables
+
+-  ``bin`` : A folder containing executable and the DLLs.
+
+-  ``lib`` : A folder containing application DLLs.
+
+To launch , simply use the launcher script in the base directory
+
+Linux 64bit
+~~~~~~~~~~~
+
+We provide for Linux 64bit OS through standalone packages. They contain
+with all OTB Applications. These packages are the same as the one used
+to install . Check the download page : `OTB Download
+page <https://www.orfeo-toolbox.org/download>`__
+
+This package is a self-extractible archive. You may uncompress it with a
+double-click on the file, or with the command line :
+
+::
+
+    > sh  OTB-5.2.1-Linux64.run
+
+Please note that the resulting installation is not meant to be moved,
+you should uncompress the archive in its final location. Once the
+archive is extracted, the directory structure is made of :
+
+-  ``monteverdi.sh`` : A launcher script for
+
+-  ``mapla.sh`` : A launcher script for Mapla
+
+-  ``otbenv.profile`` : A script to initialize the environment for OTB
+   executables
+
+-  ``bin`` : A folder containing application launchers (otbcli.sh,
+   otbgui.sh), Monteverdi and Mapla.
+
+-  ``lib`` : A folder containing all shared libraries and OTB
+   applications.
+
+-  ``share`` : A folder containing common resources and copyright
+   mentions.
+
+In order to run the command line launchers, this package doesn’t require
+any special library that is not present in most modern Linux
+distributions. The graphical executable (otbgui launchers, Monteverdi
+and Mapla) use the X11 libraries, which are widely used in a lot of
+distributions :
+
+::
+
+    libx11-6 libxext6 libxau6 libxxf86vm1 libxdmcp6 libdrm2
+
+Monteverdi also requires the standard graphics libraries **libgl1** and
+**libglu1**. Make sure you have at least one version of them installed
+in your system.
+
+To launch , simply use the launcher script in the base directory
+
+MacOS X
+~~~~~~~
+
+We provide for MacOS X through a standalone package. This package is a
+self-extractible archive, quite similar to the Linux one. You may
+uncompress it with the command line :
+
+::
+
+    > sh  OTB-5.4.0-Darwin64.run
+
+Please note that the resulting installation is not meant to be moved,
+you should uncompress the archive in its final location.
+
+Once the archive is extracted, the directory structure is made of :
+
+-  ``monteverdi.sh`` : A launcher script for
+
+-  ``mapla.sh`` : A launcher script for Mapla
+
+-  ``otbenv.profile`` : A script to initialize the environment for OTB
+   executables
+
+-  ``bin`` : A folder containing application launchers (otbcli.sh,
+   otbgui.sh), Monteverdi and Mapla.
+
+-  ``lib`` : A folder containing all shared libraries and OTB
+   applications.
+
+-  ``share`` : A folder containing common resources and copyright
+   mentions.
+
+To launch , simply use the launcher script in the base directory
+
+Other packages
+~~~~~~~~~~~~~~
+
+OSGeo4W
+^^^^^^^
+
+For Windows Seven/8.1 users, there is a classical standalone
+installation program for , available from the after each release.
+
+It is also possible to get package through for Windows XP/Seven users.
+Package for is available directly in the OSGeo4W installer when you
+select the **otb-monteverdi** package. Follow the instructions in the
+OSGeo4W installer and select the **otb-monteverdi**. The installer will
+proceed with the installation of the package and all its dependencies.
+will be directly installed in the OSGeo4W repository and a shortcut will
+be added to your desktop and in the start menu (in the OSGeo4W folder).
+You can now use directly from your desktop, from the start menu and from
+an OSGeo4W shell with command ``monteverdi``. Currently, you should use
+the 32bit OSGeo4W installer but we will soon distribute package for 64
+bit installer.
+
+Ubuntu 12.04 and higher
+^^^^^^^^^^^^^^^^^^^^^^^
+
+For Ubuntu 12.04 and higher, package may be available as Debian package
+through APT repositories. You can add it by using these command-lines:
+
+::
+
+    sudo apt-get install add-apt-repository
+    sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable
+
+Now run:
+
+::
+
+    sudo apt-get install monteverdi
+
+If you are using *Synaptic*, you can add the repository, update and
+install the package through the graphical interface.
+
+**apt-add-repository** will try to retrieve the GPG keys of the
+repositories to certify the origin of the packages. If you are behind a
+http proxy, this step won’t work and apt-add-repository will stall and
+eventually quit. You can temporarily ignore this error and proceed with
+the update step. Following this, aptitude update will issue a warning
+about a signature problem. This warning won’t prevent you from
+installing the packages.
+
+MacOS X DMG
+^^^^^^^^^^^
+
+A standard DMG package is available for for MacOS X 10.10. Please go the
+. Click on the file to launch .
+
+GUI : what does it look like ?
+------------------------------
+
+|image| [fig:mongui]
+
+This is ’s main window (figure  [fig:mongui]) where the different
+functionalities are reachable:
+
+-  1. Main menu
+
+-  2. Top toolbar
+
+-  3. Image displaying
+
+-  4. Right side dock
+
+-  5. Stack layer
+
+Main menu
+~~~~~~~~~
+
+The main menu is made up of four items. The main one is the File item,
+from which you can : open a image, load the otb applications, and
+finally quit . The Edit item lets the user change his/her preferences.
+The view item is intended to let the user display or hide different
+parts of the main window. Finally, the Help item lets the user know the
+’About’ information of the software, and also can display an useful
+keymap.
+
+Top toolbar
+~~~~~~~~~~~
+
+The top toolbar is made up of ten icons; from left to right:
+
+-  1st : open one or more image(s)
+
+-  2nd : zoom in
+
+-  3rd : zoom out
+
+-  4th : zoom to full extent
+
+-  5th : zoom to layer extent
+
+-  6th : zoom to full resolution
+
+-  7th : gives/changes the current projection, used as reference of the
+   view
+
+-  8th : selects the effect to be applied to the selected layer :
+   chessboard, local constrast, local translucency, normal, spectral
+   angle, swipe (horizontal and vertical)
+
+-  9th : a parameter used for the following effects : chessboard, local
+   contrast, local translucency, spectral angle
+
+-  10th : a parameter used for the following effects : local constrast,
+   spectral angle
+
+Image displaying
+~~~~~~~~~~~~~~~~
+
+This part of the main window is intented to display the images loaded by
+the user. There are many nice keyboard shortcuts or mouse tricks that
+let the user have a better experience in navigating throughout the
+loaded images. These shortcuts and tricks are given within the Help item
+of the main menu, by clicking Keymap; here is a short list of the most
+useful ones :
+
+The classical ones:
+
+-  CTRL+O = Open file(s)
+
+-  CTRL+Q = Quit application
+
+In the image displaying part:
+
+-  Mouse drag = Scroll view
+
+-  CTRL+Mouse drag = Quick scroll view (rending is done after releasing
+   CTRL key)
+
+-  CTRL+Mouse wheel = Zoom in out
+
+-  + or - = Zoom in out
+
+In the layer stack part:
+
+-  SHIFT+Page Up = Move layer to top of stack
+
+-  SHIFT+Page Down = Move layer to bottom of stack
+
+-  Delete = Delete selected layer
+
+-  SHIFT+Delete = Delete all layers
+
+Right side dock
+~~~~~~~~~~~~~~~
+
+The dock on the right side is divided into four tabs :
+
+-  Quicklook : gives the user a degraded view of the whole extent,
+   letting him/her easily select the area to be displayed
+
+-  Histogram : gives the user information about the value distribution
+   of the selected channels. By clicking the mouse’s left button, user
+   can sample their values.
+
+-  Color Setup : lets the user map the image channels to the RGB
+   channels. Also lets him/her set the alpha parameter (translucency).
+
+-  Color dynamics : lets the user change the displaying dynamics of a
+   selected image. For each RGB channel (each mapped to an image
+   channel), the user can decide how the pixel range of a selected image
+   will be shortcut before being rescaled to 0-255 : either by setting
+   the extremal values, or by setting the extremal quantiles.
+
+Each tab is represented by the figures below ( [fig:quickhisto]
+ [fig:colorsetdyn]).
+
+|image| [fig:quickhisto]
+
+|image| [fig:colorsetdyn]
+
+Layer stack
+~~~~~~~~~~~
+
+The layer stack is made up of one list of layers located beneath six
+icons. The list of layers gives the user some information about the
+loaded images: projection, resolution (if available), name, and effect
+applied to the images (see top toolbar subsection). If the user moves
+the mouse over the displayed images, they will get more information:
+
+-  (i,j) : pixel index
+
+-  (Red Green Blue) : original image pixel values from channel mapped to
+   the RGB ones.
+
+-  (X,Y) : pixel position
+
+Concerning the six icons, from left to right:
+
+-  1st : moves the selected layer to the top of the stack
+
+-  2nd : moves the selected layer up within the stack
+
+-  3rd : moves the selected layer down within the stack
+
+-  4th : moves the selected layer to the bottom of the stack
+
+-  5th : use selected layer as projection reference
+
+-  6th : applies all display settings (color-setup, color-dynamics,
+   shader and so forth) of selected layer to all other layers
+
+The layer stack is represented in the figure below ( [fig:layerstack]) :
+
+|image| [fig:layerstack]
+
+Examples
+--------
+
+With , it is also possible to interactively load otb-applications and
+use them to process images. For that purpose, the user just has to load
+otb-applications by clicking on the Main menu, File/Load
+OTB-Applications (or by simply using the shortcut CTRL+A). The figure
+below ( [fig:applications]) represents the otb-applications loading
+window. The applications are arranged in thematic functionalities; the
+user can also quickly find the wanted application by typing its name in
+the dedicated field at the top of the loading window.
+
+|image| [fig:applications]
+
+Optical calibration
+~~~~~~~~~~~~~~~~~~~
+
+In order to perform an optical calibration, launch the Optical
+calibration application (shortcut CTRL+A). We are going to use this
+application to perform a TOA (Top Of Atmosphere) conversion, which
+consists in converting the DN pixel values into spectral radiance (in
+W/m2/steradians/micrometers). Once the application is launched, the user
+must fill the required fields in (in, out, gainbias.txt -gain and bias
+values in a txt file-, solarillumination.txt -solar illumination values
+in watt/m2/micron for each band in a txt file-, and so on... refer to
+the documentation of the application).
+
+-  Note : if OTB (on which is based ) is able to parse the metadata of
+   the image to be calibrated, then some of the fields will be
+   automatically filled in.
+
+In the figure below ( [fig:OC]), by taking a look at the layer stack,
+one can notice that the values of the calibrated image are now expressed
+in spectral radiance.
+
+|image| [fig:OC]
+
+BandMath
+~~~~~~~~
+
+BandMath application is intended to apply mathematical operations on
+pixels (launch it with shortcut CTRL+A). In this example, we are going
+to use this application to change the dynamics of an image, and check
+the result by looking at histogram tab, in the right side dock. The
+formula used is the following : :math:`\text{im1b1} \times 1000`. In the
+figures below ( [fig:BM]), one can notice that the mode of the
+distribution is located at position :math:`356.0935`, whereas in the
+transformed image, the mode is located at position :math:`354737.1454`,
+that’s to say 1000 times farther away approximately (the cursors aren’t
+placed exactly at the same position in the screenshots).
+
+|image| [fig:BM]
+
+Segmentation
+~~~~~~~~~~~~
+
+Now, let’s use the segmentation application (launch it with shortcut
+CTRL+A). We let the user take a look at the application’s documentation;
+let’s simply say that as we wish we could display the segmentation with
+, we must tell the application to output the segmentation in raster
+format. Thus, the value of the mode option must be set to raster. The
+following figure ( [fig:seg12]) shows the original image and the labels
+image.
+
+|image| [fig:seg12]
+
+Gray colors aren’t very convenient for visualizing a segmentation.
+That’s why we are going to use another application, the ColorMapping one
+(launch it with the shortcut CTRL+A as usual). There are many ways to
+use this application (see the documentation for more details). We wish
+we could colour the segmentation so that color difference between
+adjacent regions is maximized. For this purpose, we can use the method
+optimal (set the value of this option to optimal). The figure below
+( [fig:seg3]) shows the result of such colorization.
+
+|image| [fig:seg3]
+
+Now it should be nice to superimpose this colorization with the original
+image to assess the quality of the segmentation. provides the user a
+very simple way to do it. Once the two images are loaded in and that the
+original image is placed on the top of the stack, the user just has to
+select the translucency layer effect and set the size of the exploration
+circle to convenience. The figure below ( [fig:seg4]) shows the result
+of such colorization. We encourage the reader to test the other layer
+effects.
+
+|image| [fig:seg4]
+
+Polarimetry
+~~~~~~~~~~~
+
+In this example, we are going to use three applications :
+
+-  the first one is SARDecompositions. This application is used to
+   compute the HaA decomposition. It takes as inputs three complex
+   channels from bands HH HV and VV.
+
+-  the second one is SplitImage. Indeed, the previous application had
+   produced an output image made up of three channels, H a and A, and we
+   wish to focus on the H parameter (entropy). So we let this
+   application split this image into three one-band-images.
+
+-  the last one is ColorMapping. The entropy image has values ranging
+   from 0 to 1, and they can be easily displayed by . But since we have
+   a nice visualizing tool in hand, we wish we could go a little bit
+   further. Here comes the application ColorMapping. It is going to be
+   used with the following parameter settings:
+
+   -  method = continuous. This parameters tells the application to use
+      a gradient of colors to represent the entropy image.
+
+   -  method.continuous.lut = hot. We specify here the kind of gradient
+      to be used : low values in black, high ones in white, and
+      intermediate ones in red/orange/yellow...
+
+   -  method.continuous.min = 0 and method.continuous.max = 1. Here, the
+      gradient of colors must be adjusted to the dynamic of the entropy
+      image (note: it is theoretically known that in HaA decomposition,
+      H ranges from 0 to 1. Generally speaking, the histogram of can
+      also be used for this purpose).
+
+In the figure below ( [fig:pol1]), we show the obtained result, with the
+local contrast layer effect.
+
+|image| [fig:pol1]
+
+Pansharpening
+~~~~~~~~~~~~~
+
+Finally, let’s try a last example with the Pansharpening application
+(launch it with shortcut CTRL+A). The fields are quite easy to fill in :
+this application needs a panchromatic image, a XS image, and an output
+image. These images are represented in the figures below ( [fig:ps12]
+and  [fig:ps3]):
+
+|image| [fig:ps12]
+
+|image| [fig:ps3]
+
+Now, in order to inspect the result properly, these three images are
+loaded in . The pansharpened image is placed to the top of the stack
+layer, and different layer effects are applied to it :
+
+-  in figure  [fig:ps4] : chessboard effect, to compare the result with
+   the XS image.
+
+-  in figure  [fig:ps5] : translucency effect, to compare the result
+   with the panchromatic image.
+
+|image| [fig:ps4]
+
+|image| [fig:ps5]
+
+Conclusion
+~~~~~~~~~~
+
+The images used in this documentation can be found in the OTB-Data
+repository (https://git.orfeo-toolbox.org/otb-data.git):
+
+-  in OTB-Data/Input :
+
+   -  QB\_TOULOUSE\_MUL\_Extract\_500\_500.tif and
+      QB\_Toulouse\_Ortho\_XS\_ROI\_170x230.tif (GUI presentation)
+
+   -  RSAT\_imagery\_HH.tif RSAT\_imagery\_HV.tif RSAT\_imagery\_VV.tif
+      (polarimetry example)
+
+   -  QB\_Toulouse\_Ortho\_PAN.tif QB\_Toulouse\_Ortho\_XS.tif
+      (pansharpening example)
+
+-  in OTB-Data/Input/mv2-test : QB\_1\_ortho.tif
+
+.. |image| image:: ../Art/MonteverdiImages/gui.png
+.. |image| image:: ../Art/MonteverdiImages/quickhisto.png
+.. |image| image:: ../Art/MonteverdiImages/colsetdyn.png
+.. |image| image:: ../Art/MonteverdiImages/layerstack.png
+.. |image| image:: ../Art/MonteverdiImages/applications.png
+.. |image| image:: ../Art/MonteverdiImages/OC.png
+.. |image| image:: ../Art/MonteverdiImages/BM.png
+.. |image| image:: ../Art/MonteverdiImages/seg1-2.png
+.. |image| image:: ../Art/MonteverdiImages/seg3.png
+.. |image| image:: ../Art/MonteverdiImages/seg4.png
+.. |image| image:: ../Art/MonteverdiImages/pol1.png
+.. |image| image:: ../Art/MonteverdiImages/ps1-2.png
+.. |image| image:: ../Art/MonteverdiImages/ps3.png
+.. |image| image:: ../Art/MonteverdiImages/ps4.png
+.. |image| image:: ../Art/MonteverdiImages/ps5.png
diff --git a/Documentation/Cookbook/rst/OTB-Applications.rst b/Documentation/Cookbook/rst/OTB-Applications.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fb78ed7743fabc8281fd2c34cd175e73c4414d01
--- /dev/null
+++ b/Documentation/Cookbook/rst/OTB-Applications.rst
@@ -0,0 +1,542 @@
+A brief tour of OTB-Applications
+================================
+
+Introduction
+------------
+
+**OTB Applications** was perhaps the older package of the **Orfeo
+Toolbox** suite after the OTB package itself. Since the **Orfeo
+Toolbox** is a library providing remote sensing functionalities, the
+only applications that were distributed at the beginning were the
+examples from the Software Guide and the tests. These applications are
+very useful for the developer because their code is very short and only
+demonstrates one functionality at a time. In many cases, a real
+application would require :
+
+-  combining together two or more functions from the **Orfeo Toolbox**
+
+-  providing a nice high level interface to handle : parameters, input
+   data, output data and communication with the user
+
+The **OTB Applications** package was originally designed to provide
+applications performing simple remote sensing tasks, more complex than
+simple examples from the Software Guide, and with a more user-friendly
+interface (either graphical or command-line), to demonstrate the use of
+the **Orfeo Toolbox** functions. The most popular applications are maybe
+the *otbImageViewerManager* , which allows to open a collection of
+images and navigate in them, and the
+*otbSupervisedClassificationApplication* , which allowed to delineate
+training regions of interest on the image and classify the image with a
+SVM classifier trained with these regions (this application is no longer
+maintained since the same functionnality is available through the
+corresponding **Monteverdi** module). During the first 3 years of the
+**Orfeo Toolbox** development, many more applications have been added to
+this package, to perform various tasks. Most of them came with a
+graphical user interface, apart from some small utilities that are
+command-line.
+
+The development and release of the **Monteverdi** software (see
+chapter [chap:Monteverdi] at the end of year 2009 changed a lot of
+things for the **OTB Applications** package: most of non-developer users
+were looking for quite a long time for an application providing **Orfeo
+Toolbox** functionalities under a unified graphical interface. Many
+applications from the **OTB Applications** package were integrated to
+**Monteverdi** as modules, and the **OTB Applications** package lost a
+lot of its usefulness. No more applications were added to the package
+and it was barely maintained, as new graphical tools were directly
+embedded within **Monteverdi** .
+
+Then, some people started to regain interest in the **OTB Applications**
+package. **Monteverdi** is a great tool to perform numerous remote
+sensing and image processing task in a minute, but it is not well
+adapted to heavier (and longer) processing, scripting and batch
+processing. Therefore, in 2010 the **OTB Applications** package has been
+revamped: old applications have been moved to a legacy folder for
+backward compatibility, and the development team started to populate the
+package with compact command-line tools to perform various heavy
+processing tasks.
+
+Later on in 2011, the **OTB Applications** has been further revamped.
+Because of the increasing need to interface the **OTB Applications**
+into other software and to provide auto-generated interfaces, the
+**Orfeo Toolbox** development team decided to develop a new application
+framework. The main idea of this framework is the following: each
+application is written once for all in a shared library (also known as
+plugin). This plugin can be auto-loaded into appropriate tools wihtout
+recompiling, and is able to fully describe its parameters, behaviour and
+documentation.
+
+The tools to use the plugins can be extended, but **Orfeo Toolbox**
+shipped the following:
+
+-  A command-line laucher, which is almost equivalent to the former
+   **OTB Applications** command-line interface,
+
+-  A graphical launcher, with an auto-generated QT interface, providing
+   ergonomic parameters setting, display of documentation, and progress
+   reporting,
+
+-  A SWIG interface, which means that any application can be loaded
+   set-up and executed into a high-level language such as Python or Java
+   for instance.
+
+Additionally, `QGis <http://www.qgis.org/>`_  plugins built on top of
+the SWIG/Python interface are available with seamless integration within
+QGis. You can find a short guide about it
+`here <http://wiki.orfeo-toolbox.org/index.php/Quantum_GIS_access_to_OTB_applications>`_ .
+
+To facilitate the use of these tools and applications, they will now be
+shipped with the standard **Orfeo Toolbox** package. It means that the
+former **OTB-Applications** package has entered its maintenance cycle :
+no new feature will be pushed there, and all development is done
+directly inside the **Orfeo Toolbox** paackage.
+
+The **OTB Applications** are now rich of more than 40 tools, which are
+listed in the the applications reference documentation, presented in
+chapter [chap:apprefdoc], page .
+
+Installation
+------------
+
+We provide different binary packages for OTB-Applications:
+
+-  for Windows platform (XP/Seven) through OsGeo4W installer (32/64bit)
+
+-  for Ubuntu 12.04 and higher
+
+-  for OpenSuse 12.X and higher
+
+-  for MacOSX through MacPorts software
+
+If you want build from source or if we don’t provide packages for your
+system, some informations are available into the `OTB Software
+Guide <http://orfeo-toolbox.org/SoftwareGuide>`_  , in the section
+**(**\ Building from Source)
+
+Windows XP/Seven
+~~~~~~~~~~~~~~~~
+
+Since version 3.12, we provide OTB Applications packages through OSGeo4W
+for Windows XP/Seven users:
+
+-  **otb-bin** for command line and QT applications
+
+-  **otb-python** for python applications
+
+Follow the instructions in the installer and select the packages you
+want to add. The installer will proceed with the installation of
+selected packages and all their dependencies. For the **otb-bin**
+packages, it will be available directly in the OSGeo4W shell, for
+example run
+
+::
+
+    otbgui_BandMath.
+
+For the **otb-python** packages, you can simply check from an OSGeo4W
+shell the list of available applications:
+
+::
+
+    python
+    import otbApplication
+    print str( otbApplication.Registry.GetAvailableApplications() )
+
+MacOS X
+~~~~~~~
+
+OTB Applications are now available on
+`MacPorts <http://http://www.macports.org/>`_ . The port name is called
+orfeotoolbox. You can follow the `MacPorts
+documentation <http://guide.macports.org/>`_  to install MacPorts
+first, then install the orfeotoolbox port. After the installation, you
+can used directly on your system, the OTB applications.
+
+Ubuntu 12.04 and higher
+~~~~~~~~~~~~~~~~~~~~~~~
+
+For Ubuntu 12.04 and higher, OTB Applications packages may be available
+as Debian packages through APT repositories:
+
+-  **otb-bin** for command line applications
+
+-  **otb-bin-qt** for Qt applications
+
+-  **python-otb** for python applications
+
+Since release 3.14.1, OTB Applications packages are available in the
+`ubuntugis-unstable <https://launchpad.net/~ubuntugis/+archive/ubuntugis-unstable>`_ 
+repository.
+
+You can add it by using these command-lines:
+
+::
+
+    sudo aptitude install add-apt-repository
+    sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable
+
+After you can run:
+
+::
+
+    sudo aptitude install otb-bin otb-bin-qt python-otb
+
+If you are using *Synaptic*, you can add the repositories, update and
+install the packages through the graphical interface.
+
+For further informations about Ubuntu packages go to
+`ubuntugis-unstable <https://launchpad.net/~ubuntugis/+archive/ubuntugis-unstable>`_ 
+launchpad page and click on **Read about installing**.
+
+**apt-add-repository** will try to retrieve the GPG keys of the
+repositories to certify the origin of the packages. If you are behind a
+http proxy, this step won’t work and apt-add-repository will stall and
+eventually quit. You can temporarily ignore this error and proceed with
+the update step. Following this, aptitude update will issue a warning
+about a signature problem. This warning won’t prevent you from
+installing the packages.
+
+OpenSuse 12.X and higher
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+For OpenSuse 12.X and higher, OTB Applications packages are available
+through *zypper*.
+
+First, you need to add the appropriate repositories with these
+command-lines (please replace :math:`11.4` by your OpenSuse version):
+
+::
+
+    sudo zypper ar
+    http://download.opensuse.org/repositories/games/openSUSE_11.4/ Games
+    sudo zypper ar
+    http://download.opensuse.org/repositories/Application:/Geo/openSUSE_11.4/ GEO
+    sudo zypper ar
+    http://download.opensuse.org/repositories/home:/tzotsos/openSUSE_11.4/ tzotsos
+
+Now run:
+
+::
+
+    sudo zypper refresh
+    sudo zypper install OrfeoToolbox
+    sudo zypper install OrfeoToolbox-python
+
+Alternatively you can use the One-Click Installer from the `openSUSE
+Download
+page <http://software.opensuse.org/search?q=Orfeo&baseproject=openSUSE%3A11.4&lang=en&include_home=true&exclude_debug=true>`_ 
+or add the above repositories and install through Yast Package
+Management.
+
+There is also support for the recently introduced ’rolling’ openSUSE
+distribution named ’Tumbleweed’. For Tumbleweed you need to add the
+following repositories with these command-lines:
+
+::
+
+    sudo zypper ar
+    http://download.opensuse.org/repositories/games/openSUSE_Tumbleweed/ Games
+    sudo zypper ar
+    http://download.opensuse.org/repositories/Application:/Geo/openSUSE_Tumbleweed/ GEO
+    sudo zypper ar
+    http://download.opensuse.org/repositories/home:/tzotsos/openSUSE_Tumbleweed/ tzotsos
+
+and then add the OTB packages as shown above.
+
+Using the applications
+----------------------
+
+Using the new **OTB Applications** framework is slightly more complex
+than launching a command-line tool. This section describes all the ways
+to launch the new applications. Apart from the simplified access, which
+is similar to the former access to **OTB Applications** , you will need
+to know the application name and optionally the path where the
+applications plugins are stored. For applications shipped with **Orfeo
+Toolbox** , the name of each application can be found in
+chapter [chap:apprefdoc], page .
+
+Simplified use
+~~~~~~~~~~~~~~
+
+All standard applications delivered in with **Orfeo Toolbox** comes with
+simplified scripts in the system path, allowing to launch the
+command-line and graphical user interface versions of the application in
+the same simple way we used to launch the old applications. The
+command-line interface is prefixed by ``otbcli_``, while the Qt
+interface is prefixed by ``otbgui_``. For instance, calling
+``otbcli_Convert`` will launch the command-line interface of the
+**Convert** application, while ``otbgui_Convert`` will launch its GUI.
+
+Passing arguments to the command-line version (prefixed by ``otbcli_``)
+is explained in next sub-section.
+
+Using the command-line launcher
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The command-line application launcher allows to load an application
+plugin, to set its parameters, and execute it using the command line.
+Launching the ``otbApplicationLauncherCommandLine`` without argument
+results in the following help to be displayed:
+
+::
+
+    $ otbApplicationLauncherCommandLine 
+    Usage : ./otbApplicationLauncherCommandLine module_name [MODULEPATH] [arguments]
+
+The ``module_name`` parameter corresponds to the application name. The
+``[MODULEPATH]`` argument is optional and allows to pass to the launcher
+a path where the shared library (or plugin) corresponding to
+``module_name`` is.
+
+It is also possible to set this path with the environment variable
+``ITK_AUTOLOAD_PATH``, making the ``[MODULEPATH]`` optional. This
+variable is checked by default when no ``[MODULEPATH]`` argument is
+given. When using multiple paths in ``ITK_AUTOLOAD_PATH``, one must make
+sure to use the standard path separator of the target system, which is
+``:`` on Unix, and ``;`` on Windows.
+
+An error in the application name (i.e. in parameter ``module_name``)
+will make the ``otbApplicationLauncherCommandLine`` lists the name of
+all applications found in the available path (either ``[MODULEPATH]``
+and/or ``ITK_AUTOLOAD_PATH``).
+
+To ease the use of the applications, and try avoiding extensive
+environment customization, ready-to-use scripts are provided by the OTB
+installation to launch each application, and takes care of adding the
+standard application installation path to the ``ITK_AUTOLOAD_PATH``
+environment variable.
+
+These scripts are named ``otbcli_<ApplicationName>`` and do not need any
+path settings. For example you can start the Orthorectification
+application with the script called ``otbcli_Orthorectification``.
+
+Launching an application with no or incomplete parameters will make the
+launcher display a summary of the parameters, indicating the mandatory
+parameters missing to allow for application execution. Here is an
+example with the **OrthoRectification** application:
+
+::
+
+    $ otbcli_OrthoRectification
+
+    ERROR: Waiting for at least one parameter...
+
+    ====================== HELP CONTEXT ======================
+    NAME: OrthoRectification
+    DESCRIPTION: This application allows to ortho-rectify optical images from supported sensors.
+
+    EXAMPLE OF USE: 
+    otbcli_OrthoRectification -io.in QB_TOULOUSE_MUL_Extract_500_500.tif -io.out QB_Toulouse_ortho.tif
+
+    DOCUMENTATION: http://www.orfeo-toolbox.org/Applications/OrthoRectification.html
+    ======================= PARAMETERS =======================
+            -progress                        <boolean>        Report progress 
+    MISSING -io.in                           <string>         Input Image 
+    MISSING -io.out                          <string> [pixel] Output Image  [pixel=uint8/int8/uint16/int16/uint32/int32/float/double]
+            -map                             <string>         Output Map Projection [utm/lambert2/lambert93/transmercator/wgs/epsg]
+    MISSING -map.utm.zone                    <int32>          Zone number 
+            -map.utm.northhem                <boolean>        Northern Hemisphere 
+            -map.transmercator.falseeasting  <float>          False easting 
+            -map.transmercator.falsenorthing <float>          False northing 
+            -map.transmercator.scale         <float>          Scale factor 
+            -map.epsg.code                   <int32>          EPSG Code 
+            -outputs.mode                    <string>         Parameters estimation modes [auto/autosize/autospacing]
+    MISSING -outputs.ulx                     <float>          Upper Left X 
+    MISSING -outputs.uly                     <float>          Upper Left Y 
+    MISSING -outputs.sizex                   <int32>          Size X 
+    MISSING -outputs.sizey                   <int32>          Size Y 
+    MISSING -outputs.spacingx                <float>          Pixel Size X 
+    MISSING -outputs.spacingy                <float>          Pixel Size Y 
+            -outputs.isotropic               <boolean>        Force isotropic spacing by default 
+            -elev.dem                        <string>         DEM directory 
+            -elev.geoid                      <string>         Geoid File 
+            -elev.default                    <float>          Average Elevation 
+            -interpolator                    <string>         Interpolation [nn/linear/bco]
+            -interpolator.bco.radius         <int32>          Radius for bicubic interpolation 
+            -opt.rpc                         <int32>          RPC modeling (points per axis) 
+            -opt.ram                         <int32>          Available memory for processing (in MB) 
+            -opt.gridspacing                 <float>          Resampling grid spacing 
+
+For a detailed description of the application behaviour and parameters,
+please check the application reference documentation presented
+chapter [chap:apprefdoc], page  or follow the ``DOCUMENTATION``
+hyperlink provided in ``otbApplicationLauncherCommandLine`` output.
+Parameters are passed to the application using the parameter key (which
+might include one or several ``.`` character), prefixed by a ``-``.
+Command-line examples are provided in chapter [chap:apprefdoc], page .
+
+Using the GUI launcher
+~~~~~~~~~~~~~~~~~~~~~~
+
+The graphical interface for the applications provides a usefull
+interactive user interface to set the parameters, choose files, and
+monitor the execution progress.
+
+This interface can be activated through the CMake option .
+
+This launcher needs the same two arguments as the command line launcher
+:
+
+::
+
+    $ otbApplicationLauncherQt module_name [MODULEPATH]
+
+The application paths can be set with the ``ITK_AUTOLOAD_PATH``
+environment variable, as for the command line launcher. Also, as for the
+command-line application, a more simple script is generated and
+installed by OTB to ease the configuration of the module path : to
+launch the *Rescale* graphical user interface, one will start the
+``otbgui_Rescale`` script.
+
+The resulting graphical application displays a window with several tabs:
+
+-  **Parameters** is where you set the parameters and execute the
+   application.
+
+-  **Logs** is where you see the informations given by the application
+   during its execution.
+
+-  **Progress** is where you see a progress bar of the execution (not
+   available for all applications).
+
+-  **Documentation** is where you find a summary of the application
+   documentation.
+
+In this interface, every optional parameter has a check box that you
+have to tick if you want to set a value and use this parameter. The
+mandatory parameters cannot be unchecked.
+
+The interface of the application *Rescale* is shown here as an example.
+
+|image1| [fig:rescaleParam]
+
+|image2| [fig:rescaleLogs]
+
+|image3| [fig:rescaleProgress]
+
+|image4| [fig:rescaleDocumentation]
+
+Using the Python interface
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The applications can also be accessed from Python, through a module
+named ``otbApplication``
+
+On Unix systems it is typically available in the ``/usr/lib/otb/python``
+directory. You may need to configure the environment variable
+``PYTHONPATH`` to include this directory so that the module becomes
+available from an Python shell.
+
+On Windows, you can install the ``otb-python`` package, and the module
+will be available from an OSGeo4W shell automatically.
+
+In this module, two main classes can be manipulated :
+
+-  ``Registry``, which provides access to the list of available
+   applications, and can create applications
+
+-  ``Application``, the base class for all applications. This allows to
+   interact with an application instance created by the ``Registry``
+
+As for the command line and GUI launchers, the path to the application
+modules needs to be properly set with the ``ITK_AUTOLOAD_PATH``
+environment variable. The standard location on Unix systems is
+``/usr/lib/otb/applications``. On Windows, the applications are
+available in the ``otb-bin`` OSGeo4W package, and the environment is
+configured automatically so you don’t need to tweak
+``ITK_AUTOLOAD_PATH``.
+
+Here is one example of how to use Python to run the ``Smoothing``
+application, changing the algorithm at each iteration.
+
+::
+
+    #  Example on the use of the Smoothing application
+    #
+
+    # We will use sys.argv to retrieve arguments from the command line.
+    # Here, the script will accept an image file as first argument,
+    # and the basename of the output files, without extension.
+    from sys import argv
+
+    # The python module providing access to OTB applications is otbApplication
+    import otbApplication
+
+    # otbApplication.Registry can tell you what application are available
+    print "Available applications : "
+    print str( otbApplication.Registry.GetAvailableApplications() )
+
+    # Let's create the application with codename "Smoothing"
+    app = otbApplication.Registry.CreateApplication("Smoothing")
+
+    # We print the keys of all its parameter
+    print app.GetParametersKeys()
+
+    # First, we set the input image filename
+    app.SetParameterString("in", argv[1])
+
+    # The smoothing algorithm can be set with the "type" parameter key
+    # and can take 3 values : 'mean', 'gaussian', 'anidif'
+    for type in ['mean', 'gaussian', 'anidif']:
+
+      print 'Running with ' + type + ' smoothing type'
+
+      # Here we configure the smoothing algorithm
+      app.SetParameterString("type", type)
+
+      # Set the output filename, using the algorithm to differenciate the outputs
+      app.SetParameterString("out", argv[2] + type + ".tif")
+
+      # This will execute the application and save the output file
+      app.ExecuteAndWriteOutput()
+
+Load/Save OTB-Applications parameters from/to file
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since OTB 3.20, OTB applications parameters can be export/import to/from
+an XML file using inxml/outxml parameters. Those parameters are
+available in all applications.
+
+An example is worth a thousand words
+
+::
+
+    otbcli_BandMath -il input_image_1 input_image_2
+                    -exp "abs(im1b1 - im2b1)"
+                    -out output_image
+                    -outxml saved_applications_parameters.xml
+
+Then, you can run the applications with the same parameters using the
+output xml file previously saved. For this, you have to use the inxml
+parameter:
+
+::
+
+    otbcli_BandMath -inxml saved_applications_parameters.xml
+
+Note that you can also overload parameters from command line at the same
+time
+
+::
+
+    otbcli_BandMath -inxml saved_applications_parameters.xml 
+                    -exp "(im1b1 - im2b1)"
+
+In this cas it will use as mathematical expression “(im1b1 - im2b1)”
+instead of “abs(im1b1 - im2b1)”.
+
+Finally, you can also launch applications directly from the command-line
+launcher executable using the inxml parameter without having to declare
+the application name. Use in this case:
+
+::
+
+    otbApplicationLauncherCommandLine -inxml saved_applications_parameters.xml
+
+It will retrieve the application name and related parameters from the
+input xml file and launch in this case the BandMath applications.
+
+.. |image1| image:: ./Art/QtImages/rescale_param.png
+.. |image2| image:: ./Art/QtImages/rescale_logs.png
+.. |image3| image:: ./Art/QtImages/rescale_progress.png
+.. |image4| image:: ./Art/QtImages/rescale_documentation.png
diff --git a/Documentation/Cookbook/rst/README.md b/Documentation/Cookbook/rst/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8b5fa5c0dc84109230c72929eb07602b7337dc93
--- /dev/null
+++ b/Documentation/Cookbook/rst/README.md
@@ -0,0 +1,46 @@
+rstdocs
+=======
+
+RST docs for Orfeo Toolbox CookBook
+
+This file is renamed to README.md even though the syntax is rst and not markdown. The motive is to prevent it from including in the Cookbook
+
+Introduction
+============
+
+This is an alternative/replacement of current OTB Cookbook which is written in Latex. This version is completely deviate from existing Latex format to reStructured format (rst).
+Home page of rst says, reStructuredText is an easy-to-read, what-you-see-is-what-you-get plaintext markup syntax and parser system. Indeed, every bit is true from our experience.
+You can find more about rst, and its syntax from here http://docutils.sourceforge.net/rst.html. Using sphinx build tools, rst can be converted to formats including but not limited to html, pdf, latex!.
+
+Converting existing latex to rst is not that straightforward. All rst files for OTB applications are generated using python script otbGenerateWrappersRstDoc.py.
+For others in recipes, we used a tool called pandoc to get an inital rst and then edited out errors manually. You do not have to generate them again.
+
+
+HowTo generate OTB CookBook in RST
+----------------------------------
+i) clone OTB-Documents repository
+
+cd $HOME/sources
+
+git clone https://github.com/CS-SI/OTB-Documents
+
+ii). Run cmake to configure cookbook build
+
+mkdir ~/build-cookbook
+
+cd ~/build-cookbook
+
+cmake ~/sources/OTB-Documents/CookBook -DOTB_DIR=/path/where/you/installed/otb/lib/cmake/OTB-5.0
+
+iii). Build Cookbook in RST
+
+make
+
+iv). View results
+
+open the cookbook documentation in a firefox tab!
+
+firefox ~/build-cookbook/rst/_build/html/index.html
+
+
+TODO: include Applications/app_* file if they exists
\ No newline at end of file
diff --git a/Documentation/Cookbook/rst/Recipes.rst b/Documentation/Cookbook/rst/Recipes.rst
new file mode 100644
index 0000000000000000000000000000000000000000..17d7c68fc834672961ac35ab34b4111429aeb52c
--- /dev/null
+++ b/Documentation/Cookbook/rst/Recipes.rst
@@ -0,0 +1,16 @@
+Recipes
+=======
+
+.. toctree::
+   :maxdepth: 6
+
+
+   recipes/pleiades.rst
+   recipes/optpreproc.rst
+   recipes/residual_registration.rst
+   recipes/improc.rst
+   recipes/pbclassif.rst
+   recipes/featextract.rst
+   recipes/stereo.rst
+   recipes/bandmathx.rst
+   recipes/numpy.rst
diff --git a/Documentation/Cookbook/rst/bandmathx.rst b/Documentation/Cookbook/rst/bandmathx.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b5e39c8448e68778a94c509ab09670c5b3b64ba
--- /dev/null
+++ b/Documentation/Cookbook/rst/bandmathx.rst
@@ -0,0 +1,292 @@
+BandMathX application (based on muParserX)
+==========================================
+
+This section describes how to use the BandMathX application.
+
+Syntax : first elements
+-----------------------
+
+The default prefix name for variables related to the ith input is
+*im(i+1)* (note the indexing from 1 to N, for N inputs). The user has
+the possibility to change this default behaviour by setting its own
+prefix.
+
+In this document, we will keep the default convention. Following list
+summaries the available variables for input #0 (and so on for every
+input).
+
+\|\|l\|l\|l\|\| **Variables & **Description & **Type
+im1 & a pixel from first input, made of n components/bands (first image
+is indexed by 1) & Vector
+im1bj & jth component of a pixel from first input (first band is indexed
+by 1) & Scalar
+im1bjNkxp & a neighbourhood (“N”) of pixels of the jth component from
+first input, of size kxp & Matrix
+im1bjMini & global statistic : minimum of the jth band from first input
+& Scalar
+im1bjMaxi & global statistic : maximum of the jth band from first input
+& Scalar
+im1bjMean & global statistic : mean of the jth band from first input &
+Scalar
+im1bjSum & global statistic : sum of the jth band from first input &
+Scalar
+im1bjVar & global statistic : variance of the jth band from first input
+& Scalar
+im1PhyX and im1PhyY & spacing of first input in X and Y directions &
+Scalar
+******
+
+[variables]
+
+Moreover, we also have the generic variables idxX and idxY that
+represent the indices of the current pixel (scalars).
+
+For instance, the following formula (addition of two pixels)
+
+.. math:: im1+im2
+
+[firstequation]
+
+is correct only if the two first inputs have the same number of bands.
+In addition, the following formula is not consistent even if im1
+represents a pixel of an image made of only one band:
+
+.. math:: im1+1
+
+A scalar can’t be added to a vector. The right formula is instead (one
+can notice the way that muParserX allows to define vectors on the fly):
+
+.. math:: im1+\{ 1 \}
+
+or
+
+.. math:: im1 + \{1,1,1,...,1\}
+
+if im1 is made of n components.
+
+On the other hand, the variable im1b1 for instance is represented as a
+scalar; so we have the following different possibilities:
+
+\|\|c\|c\|\| **Expression & **Status
+im1b1 + 1 & correct
+{im1b1} + {1} & correct
+im1b1 + {1} & incorrect
+{im1b1} + 1 & incorrect
+im1 + {im2b1,im2b2} & correct if im1 represents a pixel of two
+components (equivalent to im1 + im2)
+****
+
+[correctness]
+
+Similar remarks can be made for the multiplication/division; for
+instance, the following formula is incorrect:
+
+.. math:: \{im2b1,im2b2\} * \{1,2\}
+
+whereas this one is correct:
+
+.. math:: \{im2b1,im2b2\} * \{1,2\}'
+
+or in more simple terms (and only if im2 contains two components):
+
+.. math:: im2* \{1,2\}'
+
+Concerning division, this operation is not originally defined between
+two vectors (see next section “New operators and functions”
+-[ssec:operators]-).
+
+Now, let’s go back to the first formula: this one specifies the addition
+of two images band to band. With muParserX lib, we can now define such
+operation with only one formula, instead of many formulas (as many as
+the number of bands). We call this new functionality the **batch mode**,
+which directly arises from the introduction of vectors within muParserX
+framework.
+
+Finally, let’s say a few words about neighbourhood variables. These
+variables are defined for each particular input, and for each particular
+band. The two last numbers, kxp, indicate the size of the neighbourhood.
+All neighbourhoods are centred: this means that k and p can only be odd
+numbers. Moreover, k represents the dimension in the x direction (number
+of columns), and p the dimension in the y direction (number of rows).
+For instance, im1b3N3x5 represents the following neighbourhood:
+
++-----+-----+-----+
+| .   | .   | .   |
++=====+=====+=====+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+
+[correctness]
+
+Fundamentally, a neighbourhood is represented as a matrix inside the
+muParserX framework; so the remark about mathematically well-defined
+formulas still stands.
+
+New operators and functions
+---------------------------
+
+New operators and functions have been implemented within BandMathX
+application. These ones can be divided into two categories.
+
+-  adaptation of existing operators/functions, that were not originally
+   defined for vectors and matrices (for instance cos, sin, ...). These
+   new operators/ functions keep the original names to which we add the
+   prefix “v” for vector (vcos, vsin, ...) .
+
+-  truly new operators/functions.
+
+Concerning the last category, here is a list of implemented operators or
+functions (they are all implemented in otbParserXPlugins.h/.cxx files
+-OTB/Code/Common-):
+
+**Operators div and dv** The first operator allows the definition of an
+element-wise division of two vectors (and even matrices), provided that
+they have the same dimensions. The second one allows the definition of
+the division of a vector/matrix by a scalar (components are divided by
+the same unique value). For instance:
+
+.. math:: im1 ~ div ~ im2
+
+.. math:: im1 ~ dv ~ 2.0
+
+**Operators mult and mlt** These operators are the duals of the previous
+ones. For instance:
+
+.. math:: im1 ~  mult ~ im2
+
+.. math:: im1 ~  mlt ~ 2.0
+
+Note that the operator ’\*’ could have been used instead of ’pw’ one.
+But ’pw’ is a little bit more permisive, and can tolerate
+one-dimensional vector as right element.
+
+**Operators pow and pw** The first operator allows the definition of an
+element-wise exponentiation of two vectors (and even matrices), provided
+that they have the same dimensions. The second one allows the definition
+of the division of a vector/matrix by a scalar (components are
+exponentiated by the same unique value). For instance:
+
+.. math:: im1 ~ pow ~ im2
+
+.. math:: im1 ~ pw ~ 2.0
+
+**Function bands** This function allows to select specific bands from an
+image, and/or to rearrange them in a new vector; for instance:
+
+.. math:: bands(im1,\{1,2,1,1\})
+
+produces a vector of 4 components made of band 1, band 2, band 1 and
+band 1 values from the first input. Note that curly brackets must be
+used in order to select the desired band indices.
+
+**Function dotpr ** This function allows the dot product between two
+vectors or matrices (actually in our case, a kernel and a neighbourhood
+of pixels):
+
+.. math:: \sum_{(i,j)} m_1(i,j)*m_2(i,j)
+
+For instance:
+
+.. math:: dotpr(kernel1,im1b1N3x5)
+
+is correct provided that kernel1 and im1b1N3x5 have the same dimensions.
+The function can take as many neighbourhoods as needed in inputs.
+
+**Function mean** This function allows to compute the mean value of a
+given vector or neighborhood (the function can take as many inputs as
+needed; one mean value is computed per input). For instance:
+
+.. math:: mean(im1b1N3x3,im1b2N3x3,im1b3N3x3,im1b4N3x3)
+
+Note: a limitation coming from muparserX itself makes impossible to pass
+all those neighborhoods with a unique variable.
+
+**Function var** This function allows to compute the variance of a given
+vector or neighborhood (the function can take as many inputs as needed;
+one var value is computed per input). For instance:
+
+.. math:: var(im1b1N3x3)
+
+**Function median** This function allows to compute the median value of
+a given vector or neighborhood (the function can take as many inputs as
+needed; one median value is computed per input). For instance:
+
+.. math:: median(im1b1N3x3)
+
+**Function corr** This function allows to compute the correlation
+between two vectors or matrices of the same dimensions (the function
+takes two inputs). For instance:
+
+.. math:: corr(im1b1N3x3,im1b2N3x3)
+
+**Function maj** This function allows to compute the most represented
+element within a vector or a matrix (the function can take as many
+inputs as needed; one maj element value is computed per input). For
+instance:
+
+.. math:: maj(im1b1N3x3,im1b2N3x3)
+
+**Function vmin and vmax** These functions allow to compute the min or
+max value of a given vector or neighborhood (only one input). For
+instance:
+
+.. math:: (vmax(im3b1N3x5)+vmin(im3b1N3x5)) ~ div ~ \{2.0\}
+
+**Function cat** This function allows to concatenate the results of
+several expressions into a multidimensional vector, whatever their
+respective dimensions (the function can take as many inputs as needed).
+For instance:
+
+.. math:: cat(im3b1,vmin(im3b1N3x5),median(im3b1N3x5),vmax(im3b1N3x5))
+
+Note: the user should prefer the use of semi-colons (;) when setting
+expressions, instead of directly use this function. The application will
+call the function ’cat’ automatically.
+
+**Function ndvi** This function implements the classical normalized
+difference vegetation index; it tkaes two inputs. For instance:
+
+.. math:: ndvi(im1b1,im1b4)
+
+First argument is related to the visible red band, and the second one to
+the near-infrareds band.
+
+The table below summarises the different functions and operators.
+
+\|\|l\|l\|\| **Variables & **Remark
+ndvi & two inputs
+bands & two inputs; length of second vector input gives the dimension of
+the output
+dotptr & many inputs
+cat & many inputs
+mean & many inputs
+var & many inputs
+median & many inputs
+maj & many inputs
+corr & two inputs
+div and dv & operators
+mult and mlt & operators
+pow and pw & operators
+vnorm & adapation of an existing function to vectors : one input
+vabs & adapation of an existing function to vectors : one input
+vmin & adapation of an existing function to vectors : one input
+vmax & adapation of an existing function to vectors : one input
+vcos & adapation of an existing function to vectors : one input
+vsin & adapation of an existing function to vectors : one input
+vtan & adapation of an existing function to vectors : one input
+vtanh & adapation of an existing function to vectors : one input
+vsinh & adapation of an existing function to vectors : one input
+vcosh & adapation of an existing function to vectors : one input
+vlog & adapation of an existing function to vectors : one input
+vlog10 & adapation of an existing function to vectors : one input
+vexp & adapation of an existing function to vectors : one input
+vsqrt & adapation of an existing function to vectors : one input
+****
+
+[bandmathOperators]
diff --git a/Documentation/Cookbook/rst/conf.py.in b/Documentation/Cookbook/rst/conf.py.in
new file mode 100644
index 0000000000000000000000000000000000000000..34cf72cb8c304adbb0aa92a7aaaa8af0821e2ef8
--- /dev/null
+++ b/Documentation/Cookbook/rst/conf.py.in
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+#
+# OTB CookBook documentation build configuration file, created by
+# sphinx-quickstart on Wed Dec 10 17:09:42 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.todo',
+    'sphinx.ext.mathjax',
+    'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'OTB CookBook'
+copyright = u'@OTB_COPYRIGHT_TEXT@'
+#copyright = u'2014, OTB Team'
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '@OTB_FULL_VERSION@'
+#version = '5.2.0'
+# The full version, including alpha/beta/rc tags.
+release = '@OTB_FULL_VERSION@'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['@RST_BUILD_DIR@']
+#exclude_patterns = ['_build']
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'OTBCookBookdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'OTBCookBook.tex', u'OTB CookBook Documentation',
+   u'OTB Team', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'otbcookbook', u'OTB CookBook Documentation',
+     [u'OTB Team'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'OTBCookBook', u'OTB CookBook Documentation',
+   u'OTB Team', 'OTBCookBook', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/Documentation/Cookbook/rst/featextract.rst b/Documentation/Cookbook/rst/featextract.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3f92e0280fc26ce80174f6c4c852fb51e56a2b4b
--- /dev/null
+++ b/Documentation/Cookbook/rst/featextract.rst
@@ -0,0 +1,524 @@
+Feature extraction
+==================
+
+As described in the OTB Software Guide, the term *Feature Extraction*
+refers to techniques aiming at extracting added value information from
+images. These extracted items named *features* can be local statistical
+moments, edges, radiometric indices, morphological and textural
+properties. For example, such features can be used as input data for
+other image processing methods like *Segmentation* and *Classification*.
+
+Local statistics extraction
+---------------------------
+
+This application computes the 4 local statistical moments on every pixel
+in the selected channel of the input image, over a specified
+neighborhood. The output image is multi band with one statistical moment
+(feature) per band. Thus, the 4 output features are the Mean, the
+Variance, the Skewness and the Kurtosis. They are provided in this exact
+order in the output image.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to compute the features on
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  ``-radius`` the computational window radius (default value is 3
+   pixels)
+
+-  ``-out`` the output image containing the local statistical moments
+
+The application can be used like this:
+
+::
+
+    otbcli_LocalStatisticExtraction  -in        InputImage
+                                     -channel   1
+                                     -radius    3
+                                     -out       OutputImage
+
+Edge extraction
+---------------
+
+This application Computes edge features on every pixel in the selected
+channel of the input image.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to compute the features on
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  | ``-filter`` the choice of edge detection method
+   (gradient/sobel/touzi) (default value is gradient)   
+
+-  ``(-filter.touzi.xradius)`` the X Radius of the Touzi processing
+   neighborhood (only if filter==touzi) (default value is 1 pixel)
+
+-  | ``(-filter.touzi.yradius)`` the Y Radius of the Touzi processing
+   neighborhood (only if filter==touzi) (default value is 1 pixel)   
+
+-  ``-out`` the output mono band image containing the edge features
+
+The application can be used like this:
+
+::
+
+    otbcli_EdgeExtraction  -in        InputImage
+                           -channel   1
+                           -filter    sobel
+                           -out       OutputImage
+
+or like this if filter==touzi:
+
+::
+
+    otbcli_EdgeExtraction  -in                    InputImage
+                           -channel               1
+                           -filter                touzi
+                           -filter.touzi.xradius  2
+                           -filter.touzi.yradius  2 
+                           -out                   OutputImage
+
+Radiometric indices extraction
+------------------------------
+
+This application computes radiometric indices using the channels of the
+input image. The output is a multi band image into which each channel is
+one of the selected indices.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to compute the features on
+
+-  ``-out`` the output image containing the radiometric indices
+
+-  ``-channels.blue`` the Blue channel index in the input image (default
+   value is 1)
+
+-  ``-channels.green`` the Green channel index in the input image
+   (default value is 1)
+
+-  ``-channels.red`` the Red channel index in the input image (default
+   value is 1)
+
+-  ``-channels.nir`` the Near Infrared channel index in the input image
+   (default value is 1)
+
+-  ``-channels.mir`` the Mid-Infrared channel index in the input image
+   (default value is 1)
+
+-  ``-list`` the list of available radiometric indices (default value is
+   Vegetation:NDVI)
+
+The available radiometric indices to be listed into -list with their
+relevant channels in brackets are:
+
+::
+
+    Vegetation:NDVI - Normalized difference vegetation index (Red, NIR)
+    Vegetation:TNDVI - Transformed normalized difference vegetation index (Red, NIR)
+    Vegetation:RVI - Ratio vegetation index (Red, NIR)
+    Vegetation:SAVI - Soil adjusted vegetation index (Red, NIR)
+    Vegetation:TSAVI - Transformed soil adjusted vegetation index (Red, NIR)
+    Vegetation:MSAVI - Modified soil adjusted vegetation index (Red, NIR)
+    Vegetation:MSAVI2 - Modified soil adjusted vegetation index 2 (Red, NIR)
+    Vegetation:GEMI - Global environment monitoring index (Red, NIR)
+    Vegetation:IPVI - Infrared percentage vegetation index (Red, NIR)
+
+    Water:NDWI - Normalized difference water index (Gao 1996) (NIR, MIR)
+    Water:NDWI2 - Normalized difference water index (Mc Feeters 1996) (Green, NIR)
+    Water:MNDWI - Modified normalized difference water index (Xu 2006) (Green, MIR)
+    Water:NDPI - Normalized difference pond index (Lacaux et al.) (MIR, Green)
+    Water:NDTI - Normalized difference turbidity index (Lacaux et al.) (Red, Green)
+
+    Soil:RI - Redness index (Red, Green)
+    Soil:CI - Color index (Red, Green)
+    Soil:BI - Brightness index (Red, Green)
+    Soil:BI2 - Brightness index 2 (NIR, Red, Green)
+
+The application can be used like this, which leads to an output image
+with 3 bands, respectively with the Vegetation:NDVI, Vegetation:RVI and
+Vegetation:IPVI radiometric indices in this exact order:
+
+::
+
+    otbcli_RadiometricIndices -in             InputImage
+                              -out            OutputImage
+                              -channels.red   3
+                              -channels.green 2
+                              -channels.nir   4
+                              -list           Vegetation:NDVI Vegetation:RVI
+                                              Vegetation:IPVI 
+
+or like this, which leads to a single band output image with the
+Water:NDWI2 radiometric indice:
+
+::
+
+    otbcli_RadiometricIndices -in             InputImage
+                              -out            OutputImage
+                              -channels.red   3
+                              -channels.green 2
+                              -channels.nir   4
+                              -list           Water:NDWI2 
+
+Morphological features extraction
+---------------------------------
+
+Morphological features can be highlighted by using image filters based
+on mathematical morphology either on binary or gray scale images.
+
+Binary morphological operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application performs binary morphological operations (dilation,
+erosion, opening and closing) on a mono band image with a specific
+structuring element (a ball or a cross) having one radius along X and
+another one along Y. NB: the cross shaped structuring element has a
+fixed radius equal to 1 pixel in both X and Y directions.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to be filtered
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  ``-structype`` the choice of the structuring element type
+   (ball/cross) (default value is ball)
+
+-  ``(-structype.ball.xradius)`` the ball structuring element X Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-  ``(-structype.ball.yradius)`` the ball structuring element Y Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-  ``-filter`` the choice of the morphological operation
+   (dilate/erode/opening/closing) (default value is dilate)
+
+-  ``(-filter.dilate.foreval)`` the foreground value for the dilation
+   (idem for filter.erode/opening/closing) (default value is 1)
+
+-  ``(-filter.dilate.backval)`` the background value for the dilation
+   (idem for filter.erode/opening/closing) (default value is 0)
+
+-  ``-out`` the output filtered image
+
+The application can be used like this:
+
+::
+
+    otbcli_BinaryMorphologicalOperation  -in                     InputImage
+                                         -channel                1
+                                         -structype              ball
+                                         -structype.ball.xradius 10
+                                         -structype.ball.yradius 5
+                                         -filter                 opening
+                                         -filter.opening.foreval 1.0
+                                         -filter.opening.backval 0.0
+                                         -out                    OutputImage
+
+Gray scale morphological operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application performs morphological operations (dilation, erosion,
+opening and closing) on a gray scale mono band image with a specific
+structuring element (a ball or a cross) having one radius along X and
+another one along Y. NB: the cross shaped structuring element has a
+fixed radius equal to 1 pixel in both X and Y directions.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to be filtered
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  ``-structype`` the choice of the structuring element type
+   (ball/cross) (default value is ball)
+
+-  ``(-structype.ball.xradius)`` the ball structuring element X Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-  ``(-structype.ball.yradius)`` the ball structuring element Y Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-  ``-filter`` the choice of the morphological operation
+   (dilate/erode/opening/closing) (default value is dilate)
+
+-  ``-out`` the output filtered image
+
+The application can be used like this:
+
+::
+
+    otbcli_GrayScaleMorphologicalOperation  -in                     InputImage
+                                            -channel                1
+                                            -structype              ball
+                                            -structype.ball.xradius 10
+                                            -structype.ball.yradius 5
+                                            -filter                 opening
+                                            -out                    OutputImage
+
+Textural features extraction
+----------------------------
+
+Texture features can be extracted with the help of image filters based
+on texture analysis methods like Haralick and structural feature set
+(SFS).
+
+Haralick texture features
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application computes Haralick, advanced and higher order texture
+features on every pixel in the selected channel of the input image. The
+output image is multi band with a feature per band.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to compute the features on
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  ``-texture`` the texture set selection [simple/advanced/higher]
+   (default value is simple)
+
+-  ``-parameters.min`` the input image minimum (default value is 0)
+
+-  ``-parameters.max`` the input image maximum (default value is 255)
+
+-  ``-parameters.xrad`` the X Radius of the processing neighborhood
+   (default value is 2 pixels)
+
+-  ``-parameters.yrad`` the Y Radius of the processing neighborhood
+   (default value is 2 pixels)
+
+-  ``-parameters.xoff`` the :math:`\Delta`\ X Offset for the
+   co-occurrence computation (default value is 1 pixel)
+
+-  ``-parameters.yoff`` the :math:`\Delta`\ Y Offset for the
+   co-occurrence computation (default value is 1 pixel)
+
+-  ``-parameters.nbbin`` the number of bin per axis for histogram
+   generation (default value is 8)
+
+-  ``-out`` the output multi band image containing the selected texture
+   features (one feature per band)
+
+The available values for -texture with their relevant features are:
+
+-  ``-texture=simple:`` In this case, 8 local Haralick textures features
+   will be processed. The 8 output image channels are: Energy, Entropy,
+   Correlation, Inverse Difference Moment, Inertia, Cluster Shade,
+   Cluster Prominence and Haralick Correlation. They are provided in
+   this exact order in the output image. Thus, this application computes
+   the following Haralick textures over a neighborhood with user defined
+   radius. To improve the speed of computation, a variant of Grey Level
+   Co-occurrence Matrix(GLCM) called Grey Level Co-occurrence Indexed
+   List (GLCIL) is used. Given below is the mathematical explanation on
+   the computation of each textures. Here :math:` g(i, j) ` is the
+   frequency of element in the GLCIL whose index is i, j. GLCIL stores a
+   pair of frequency of two pixels taken from the given offset and the
+   cell index (i, j) of the pixel in the neighborhood window. :(where
+   each element in GLCIL is a pair of pixel index and it’s frequency,
+   :math:` g(i, j) ` is the frequency value of the pair having index is
+   i, j).
+
+   “Energy” :math:` = f_1 = \sum_{i, j}g(i, j)^2 `
+
+   “Entropy” :math:` = f_2 = -\sum_{i, j}g(i, j) \log_2 g(i, j)`, or 0
+   if :math:`g(i, j) = 0`
+
+   “Correlation”
+   :math:` = f_3 = \sum_{i, j}\frac{(i - \mu)(j - \mu)g(i, j)}{\sigma^2} `
+
+   “Inverse Difference Moment”
+   :math:`= f_4 = \sum_{i, j}\frac{1}{1 + (i - j)^2}g(i, j) `
+
+   “Inertia” :math:` = f_5 = \sum_{i, j}(i - j)^2g(i, j) ` (sometimes
+   called “contrast”)
+
+   “Cluster Shade”
+   :math:` = f_6 = \sum_{i, j}((i - \mu) + (j - \mu))^3 g(i, j) `
+
+   “Cluster Prominence”
+   :math:` = f_7 = \sum_{i, j}((i - \mu) + (j - \mu))^4 g(i, j) `
+
+   “Haralick’s Correlation”
+   :math:` = f_8 = \frac{\sum_{i, j}(i, j) g(i, j) -\mu_t^2}{\sigma_t^2} `
+   where :math:`\mu_t` and :math:`\sigma_t` are the mean and standard
+   deviation of the row (or column, due to symmetry) sums. Above,
+   :math:` \mu = ` (weighted pixel average)
+   :math:` = \sum_{i, j}i \cdot g(i, j) = \sum_{i, j}j \cdot g(i, j) `
+   (due to matrix symmetry), and :math:` \sigma = ` (weighted pixel
+   variance)
+   :math:` = \sum_{i, j}(i - \mu)^2 \cdot g(i, j) = \sum_{i, j}(j - \mu)^2 \cdot g(i, j) `
+   (due to matrix symmetry).
+
+-  ``-texture=advanced:`` In this case, 10 advanced texture features
+   will be processed. The 10 output image channels are: Mean, Variance,
+   Dissimilarity, Sum Average, Sum Variance, Sum Entropy, Difference of
+   Entropies, Difference of Variances, IC1 and IC2. They are provided in
+   this exact order in the output image. The textures are computed over
+   a sliding window with user defined radius.
+
+   To improve the speed of computation, a variant of Grey Level
+   Co-occurrence Matrix(GLCM) called Grey Level Co-occurrence Indexed
+   List (GLCIL) is used. Given below is the mathematical explanation on
+   the computation of each textures. Here :math:` g(i, j) ` is the
+   frequency of element in the GLCIL whose index is i, j. GLCIL stores a
+   pair of frequency of two pixels taken from the given offset and the
+   cell index (i, j) of the pixel in the neighborhood window. :(where
+   each element in GLCIL is a pair of pixel index and it’s frequency,
+   :math:` g(i, j) ` is the frequency value of the pair having index is
+   i, j).
+
+   “Mean” :math:` = \sum_{i, j}i g(i, j) `
+
+   “Sum of squares: Variance”
+   :math:` = f_4 = \sum_{i, j}(i - \mu)^2 g(i, j) `
+
+   “Dissimilarity” :math:` = f_5 = \sum_{i, j}(i - j) g(i, j)^2 `
+
+   “Sum average” :math:` = f_6 = -\sum_{i}i g_{x+y}(i) `
+
+   “Sum Variance” :math:` = f_7 = \sum_{i}(i - f_8)^2 g_{x+y}(i) `
+
+   “Sum Entropy” :math:`= f_8 = -\sum_{i}g_{x+y}(i) log (g_{x+y}(i)) `
+
+   “Difference variance” :math:` = f_10 = variance of g_{x-y}(i) `
+
+   “Difference entropy”
+   :math:` = f_11 = -\sum_{i}g_{x-y}(i) log (g_{x-y}(i)) `
+
+   “Information Measures of Correlation IC1”
+   :math:` = f_12 = \frac{f_9 - HXY1}{H} `
+
+   “Information Measures of Correlation IC2”
+   :math:` = f_13 = \sqrt{1 - \exp{-2}|HXY2 - f_9|} `
+
+   Above, :math:` \mu =  ` (weighted pixel average)
+   :math:` = \sum_{i, j}i \cdot g(i, j) =  \sum_{i, j}j \cdot g(i, j) `
+   (due to matrix summetry), and
+
+   :math:` g_{x+y}(k) =  \sum_{i}\sum_{j}g(i) ` where :math:` i+j=k `
+   and :math:` k = 2, 3, .., 2N_{g} ` and
+
+   :math:` g_{x-y}(k) =  \sum_{i}\sum_{j}g(i) ` where :math:` i-j=k `
+   and :math:` k = 0, 1, .., N_{g}-1 `
+
+-  ``-texture=higher:`` In this case, 11 local higher order statistics
+   texture coefficients based on the grey level run-length matrix will
+   be processed. The 11 output image channels are: Short Run Emphasis,
+   Long Run Emphasis, Grey-Level Nonuniformity, Run Length
+   Nonuniformity, Run Percentage, Low Grey-Level Run Emphasis, High
+   Grey-Level Run Emphasis, Short Run Low Grey-Level Emphasis, Short Run
+   High Grey-Level Emphasis, Long Run Low Grey-Level Emphasis and Long
+   Run High Grey-Level Emphasis. They are provided in this exact order
+   in the output image. Thus, this application computes the following
+   Haralick textures over a sliding window with user defined radius:
+   (where :math:` p(i, j) ` is the element in cell i, j of a normalized
+   Run Length Matrix, :math:`n_r` is the total number of runs and
+   :math:`n_p` is the total number of pixels):
+
+   “Short Run Emphasis”
+   :math:` = SRE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{j^2} `
+
+   “Long Run Emphasis”
+   :math:` = LRE = \frac{1}{n_r} \sum_{i, j}p(i, j) * j^2 `
+
+   “Grey-Level Nonuniformity”
+   :math:` = GLN = \frac{1}{n_r} \sum_{i} \left( \sum_{j}{p(i, j)} \right)^2 `
+
+   “Run Length Nonuniformity”
+   :math:` = RLN = \frac{1}{n_r} \sum_{j} \left( \sum_{i}{p(i, j)} \right)^2 `
+
+   “Run Percentage” :math:` = RP = \frac{n_r}{n_p} `
+
+   “Low Grey-Level Run Emphasis”
+   :math:` = LGRE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{i^2} `
+
+   “High Grey-Level Run Emphasis”
+   :math:` = HGRE = \frac{1}{n_r} \sum_{i, j}p(i, j) * i^2 `
+
+   “Short Run Low Grey-Level Emphasis”
+   :math:` = SRLGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{i^2 j^2} `
+
+   “Short Run High Grey-Level Emphasis”
+   :math:` = SRHGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j) * i^2}{j^2} `
+
+   “Long Run Low Grey-Level Emphasis”
+   :math:` = LRLGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j) * j^2}{i^2} `
+
+   “Long Run High Grey-Level Emphasis”
+   :math:` = LRHGE = \frac{1}{n_r} \sum_{i, j} p(i, j) i^2 j^2 `
+
+The application can be used like this:
+
+::
+
+    otbcli_HaralickTextureExtraction  -in             InputImage
+                                      -channel        1
+                                      -texture        simple
+                                      -parameters.min 0
+                                      -parameters.max 255
+                                      -out            OutputImage
+
+SFS texture extraction
+~~~~~~~~~~~~~~~~~~~~~~
+
+This application computes Structural Feature Set textures on every pixel
+in the selected channel of the input image. The output image is multi
+band with a feature per band. The 6 output texture features are
+SFS’Length, SFS’Width, SFS’PSI, SFS’W-Mean, SFS’Ratio and SFS’SD. They
+are provided in this exact order in the output image.
+
+It is based on line direction estimation and described in the following
+publication. Please refer to Xin Huang, Liangpei Zhang and Pingxiang Li
+publication, Classification and Extraction of Spatial Features in Urban
+Areas Using High-Resolution Multispectral Imagery. IEEE Geoscience and
+Remote Sensing Letters, vol. 4, n. 2, 2007, pp 260-264.
+
+The texture is computed for each pixel using its neighborhood. User can
+set the spatial threshold that is the max line length, the spectral
+threshold that is the max difference authorized between a pixel of the
+line and the center pixel of the current neighborhood. The adjustement
+constant alpha and the ratio Maximum Consideration Number, which
+describes the shape contour around the central pixel, are used to
+compute the :math:`w - mean` value.
+
+The application has the following input parameters:
+
+-  ``-in`` the input image to compute the features on
+
+-  ``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-  ``-parameters.spethre`` the spectral threshold (default value is 50)
+
+-  ``-parameters.spathre`` the spatial threshold (default value is 100
+   pixels)
+
+-  ``-parameters.nbdir`` the number of directions (default value is 20)
+
+-  ``-parameters.alpha`` the alpha value (default value is 1)
+
+-  ``-parameters.maxcons`` the ratio Maximum Consideration Number
+   (default value is 5)
+
+-  ``-out`` the output multi band image containing the selected texture
+   features (one feature per band)
+
+The application can be used like this:
+
+::
+
+    otbcli_SFSTextureExtraction -in             InputImage
+                                -channel        1
+                                -out            OutputImage
+
diff --git a/Documentation/Cookbook/rst/improc.rst b/Documentation/Cookbook/rst/improc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7265657371aaf0b4d088b3743ba8fd348eb8717a
--- /dev/null
+++ b/Documentation/Cookbook/rst/improc.rst
@@ -0,0 +1,380 @@
+Image processing and information extraction
+===========================================
+
+Simple calculus with channels
+-----------------------------
+
+The application provides a simple and efficient way to perform band
+operations. The command line application and the corresponding
+Monteverdi module (shown in the section [Band:sub:`m`\ ath module]) are
+based on the same standards. It computes a band wise operation according
+to a user defined mathematical expression. The following code computes
+the absolute difference between first bands of two images:
+
+::
+
+    otbcli_BandMath -il input_image_1 input_image_2
+                    -exp "abs(im1b1 - im2b1)"
+                    -out output_image
+
+The naming convention “im[x]b[y]” designates the yth band of the xth
+input image.
+
+The application embeds built-in operators and functions (listed
+`here <http://muparser.sourceforge.net/mup_features.html#idDef2>`__),
+allowing a vast choice of possible operations.
+
+Images with no-data values
+--------------------------
+
+Image files can contain a no-data value in their metadata. It represents
+a special pixel value that should be treated as “no data available for
+this pixel”. For instance, SRTM tiles use a particular no-data value of
+-32768 (usually found on sea areas).
+
+On multiband images, the no-data values are handled independently for
+each band. The case of an image with no-data values defined only for a
+subset of its bands is supported.
+
+This metadata is now handled by OTB image readers and writer (using the
+GDAL driver). The no-data value can be read from an image files and
+stored in the image metadata dictionary. It can also be exported by
+image writers. The OTB filters that produce a no-data value are able to
+export this value so that the output file will store it.
+
+An application has been created to manage the no-data value. The
+application has the following features :
+
+-  Build a mask corresponding to the no-data pixels in the input image :
+   it gives you a binary image of the no-data pixels in your input
+   image.
+
+-  Change the no-data value of the input image : it will change all
+   pixels that carry the old no-data value to the new one and update the
+   metadata
+
+-  Apply an external mask to the input image as no-data : all the pixels
+   that corresponds have a null mask value are flagged as no-data in the
+   output image.
+
+For instance, the following command converts the no-data value of the
+input image to the default value for DEM (which is -32768) :
+
+::
+
+    otbcli_ManageNoData -in input_image.tif
+                        -out output_image.tif
+                        -mode changevalue
+                        -mode.changevalue.newv -32768
+
+The third mode “apply” can be useful if you apply a formula to the
+entire image. This will likely change the values of pixels flagged as
+no-data, but the no-data value in the image metadata doesn’t change. If
+you want to fix all no-data pixels to their original value, you can
+extract the mask of the original image and apply it on the output image.
+For instance:
+
+::
+
+    otbcli_ManageNoData -in input_image.tif
+                        -out mask.tif
+                        -mode buildmask
+
+    otbcli_BandMath -il input_image.tif
+                    -out filtered_image.tif
+                    -exp "2*im1b1-4"
+
+    otbcli_ManageNoData -in filtered_image.tif
+                        -out output_image.tif
+                        -mode apply
+                        -mode.apply.mask mask.tif
+
+Segmentation
+------------
+
+Segmenting objects across a very high resolution scene and with a
+controlled quality is a difficult task for which no method has reached a
+sufficient level of performance to be considered as operational.
+
+Even if we leave aside the question of segmentation quality and consider
+that we have a method performing reasonably well on our data and objects
+of interest, the task of scaling up segmentation to real very high
+resolution data is itself challenging. First, we can not load the whole
+data into memory, and there is a need for on the flow processing which
+does not cope well with traditional segmentation algorithms. Second, the
+result of the segmentation process itself is difficult to represent and
+manipulate efficiently.
+
+The experience of segmenting large remote sensing images is packed into
+a single in .
+
+You can find more information about this application
+`here <http://blog.orfeo-toolbox.org/preview/coming-next-large-scale-segmentation>`__.
+
+Large-Scale Mean-Shift (LSMS) segmentation
+------------------------------------------
+
+LSMS is a segmentation workflow which allows to perform tile-wise
+segmentation of very large image with theoretical guarantees of getting
+identical results to those without tiling.
+
+It has been developed by David Youssefi and Julien Michel during David
+internship at CNES.
+
+For more a complete description of the LSMS method, please refer to the
+following publication, *J. Michel, D. Youssefi and M. Grizonnet, “Stable
+Mean-Shift Algorithm and Its Application to the Segmentation of
+Arbitrarily Large Remote Sensing Images,” in IEEE Transactions on
+Geoscience and Remote Sensing, vol. 53, no. 2, pp. 952-964, Feb. 2015.*
+
+The workflow consists in chaining 3 or 4 dedicated applications and
+produces a GIS vector file with artifact-free polygons corresponding to
+the segmented image, as well as mean and variance of the radiometry of
+each band for each polygon.
+
+Step 1: Mean-Shift Smoothing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The first step of the workflow is to perform Mean-Shift smoothing with
+the application:
+
+::
+
+    otbcli_MeanShiftSmoothing -in input_image 
+                              -fout filtered_range.tif 
+                              -foutpos filtered_spat.tif 
+                              -ranger 30 
+                              -spatialr 5 
+                              -maxiter 10 
+                              -modesearch 0
+
+Note that the *modesearch* option should be disabled, and that the
+*foutpos* parameter is optional: it can be activated if you want to
+perform the segmentation based on both spatial and range modes.
+
+This application will smooth large images by streaming them, and
+deactivating the *modesearch* will guarantee that the results will not
+depend on the streaming scheme. Please also note that the *maxiter* is
+used to set the margin to ensure these identical results, and as such
+increasing the *maxiter* may have an additional impact on processing
+time.
+
+Step 2: Segmentation
+~~~~~~~~~~~~~~~~~~~~
+
+The next step is to produce an initial segmentation based on the
+smoothed images produced by the application. To do so, the will process
+them by tiles whose dimensions are defined by the *tilesizex* and
+*tilesizey* parameters, and by writing intermediate images to disk, thus
+keeping the memory consumption very low throughout the process. The
+segmentation will group together adjacent pixels whose range distance is
+below the *ranger* parameter and (optionally) spatial distance is below
+the *spatialr* parameter.
+
+::
+
+    otbcli_LSMSSegmentation -in filtered_range.tif
+                            -inpos filtered_spatial.tif
+                            -out  segmentation.tif uint32 
+                            -ranger 30 
+                            -spatialr 5 
+                            -minsize 0 
+                            -tilesizex 256 
+                            -tilesizey 256
+
+Note that the final segmentation image may contains a very large number
+of segments, and the *uint32* image type should therefore be used to
+ensure that there will be enough labels to index those segments. The
+*minsize* parameter will filter segments whose size in pixels is below
+its value, and their labels will be set to 0 (nodata).
+
+Please note that the output segmented image may look patchy, as if there
+were tiling artifacts: this is because segments are numbered
+sequentially with respect to the order in which tiles are processed. You
+will see after the result of the vectorization step that there are no
+artifacts in the results.
+
+The application will write as many intermediate files as tiles needed
+during processing. As such, it may require twice as free disk space as
+the final size of the final image. The *cleanup* option (active by
+default) will clear the intermediate files during the processing as soon
+as they are not needed anymore. By default, files will be written to the
+current directory. The *tmpdir* option allows to specify a different
+directory for these intermediate files.
+
+Step 3 (optional): Merging small regions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application allows to filter out small segments. In the output
+segmented image, those segments will be removed and replaced by the
+background label (0). Another solution to deal with the small regions is
+to merge them with the closest big enough adjacent region in terms of
+radiometry. This is handled by the application, which will output a
+segmented image where small regions have been merged. Again, the
+*uint32* image type is advised for this output image.
+
+::
+
+    otbcli_LSMSSmallRegionsMerging -in filtered_range.tif
+                                   -inseg segementation.tif
+                                   -out segmentation_merged.tif uint32 
+                                   -minsize 10 
+                                   -tilesizex 256 
+                                   -tilesizey 256
+
+The *minsize* parameter allows to specify the threshold on the size of
+the regions to be merged. Like the application, this application will
+process the input images tile-wise to keep resources usage low, with the
+guarantee of identical results. You can set the tile size using the
+*tilesizex* and *tilesizey* parameters. However unlike the application,
+it does not require to write any temporary file to disk.
+
+Step 4: Vectorization
+~~~~~~~~~~~~~~~~~~~~~
+
+The last step of the LSMS workflow consists in the vectorization of the
+segmented image into a GIS vector file. This vector file will contain
+one polygon per segment, and each of these polygons will hold additional
+attributes denoting the label of the original segment, the size of the
+segment in pixels, and the mean and variance of each band over the
+segment. The projection of the output GIS vector file will be the same
+as the projection from the input image (if input image has no
+projection, so does the output GIS file).
+
+::
+
+    otbcli_LSMSVectorization -in input_image 
+                             -inseg segmentation_merged.tif 
+                             -out segmentation_merged.shp 
+                             -tilesizex 256 
+                             -tilesizey 256
+
+This application will process the input images tile-wise to keep
+resources usage low, with the guarantee of identical results. You can
+set the tile size using the *tilesizex* and *tilesizey* parameters.
+However unlike the application, it does not require to write any
+temporary file to disk.
+
+Dempster Shafer based Classifier Fusion
+---------------------------------------
+
+This framework is dedicated to perform cartographic validation starting
+from the result of a detection (for example a road extraction), enhance
+the results fiability by using a classifier fusion algorithm. Using a
+set of descriptor, the processing chain validates or invalidates the
+input geometrical features.
+
+Fuzzy Model (requisite)
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The application performs the fuzzy model estimation (once by use case:
+descriptor set / Belief support / Plausibility support). It has the
+following input parameters :
+
+-  ``-psin`` a vector data of positive samples enriched according to the
+   “Compute Descriptors” part
+
+-  ``-nsin`` a vector data of negative samples enriched according to the
+   “Compute Descriptors” part
+
+-  ``-belsup`` a support for the Belief computation
+
+-  ``-plasup`` a support for the Plausibility computation
+
+-  ``-desclist`` an initialization model (xml file) or a descriptor name
+   list (listing the descriptors to be included in the model)
+
+The application can be used like this:
+
+::
+
+    otbcli_DSFuzzyModelEstimation -psin     PosSamples.shp
+                                  -nsin     NegSamples.shp
+                                  -belsup   "ROADSA"
+                                  -plasup   "NONDVI" "ROADSA" "NOBUIL"
+                                  -desclist "NONDVI" "ROADSA" "NOBUIL"
+                                  -out      FuzzyModel.xml
+
+The output file ``FuzzyModel.xml`` contains the optimal model to perform
+informations fusion.
+
+First Step: Compute Descriptors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The first step in the classifier fusion based validation is to compute,
+for each studied polyline, the choosen descriptors. In this context, the
+application can be used for a large range of descriptors. It has the
+following inputs :
+
+-  ``-in`` an image (of the sudied scene) corresponding to the choosen
+   descriptor (NDVI, building Mask…)
+
+-  ``-vd`` a vector data containing polyline of interest
+
+-  ``-expr`` a formula (“b1 >0.4”, “b1 == 0”) where b1 is the standard
+   name of input image first band
+
+-  ``-field`` a field name corresponding to the descriptor codename
+   (NONDVI, ROADSA...)
+
+The output is a vector data containing polylines with a new field
+containing the descriptor value. In order to add the “NONDVI” descriptor
+to an input vector data (“inVD.shp”) corresponding to the percentage of
+pixels along a polyline that verifies the formula “NDVI >0.4” :
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   NDVI.TIF
+                                           -vd  inVD.shp
+                                           -expr  "b1 > 0.4"
+                                           -field "NONDVI"
+                                           -out   VD_NONDVI.shp
+
+``NDVI.TIF`` is the NDVI mono band image of the studied scene. This step
+must be repeated for each choosen descriptor:
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   roadSpectralAngle.TIF
+                                           -vd  VD_NONDVI.shp
+                                           -expr  "b1 > 0.24"
+                                           -field "ROADSA"
+                                           -out   VD_NONDVI_ROADSA.shp
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   Buildings.TIF
+                                           -vd  VD_NONDVI_ROADSA.shp
+                                           -expr  "b1 == 0"
+                                           -field "NOBUILDING"
+                                           -out   VD_NONDVI_ROADSA_NOBUIL.shp
+
+Both ``NDVI.TIF`` and ``roadSpectralAngle.TIF`` can be produced using
+feature extraction capabilities, and ``Buildings.TIF`` can be generated
+using rasterization module. From now on, ``VD_NONDVI_ROADSA_NOBUIL.shp``
+contains three descriptor fields. It will be used in the following part.
+
+Second Step: Feature Validation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The final application () will validate or unvalidate the studied samples
+using `the Dempster-Shafer
+theory <http://en.wikipedia.org/wiki/Dempster%E2%80%93Shafer_theory>`__
+. Its inputs are :
+
+-  ``-in`` an enriched vector data “VD\_NONDVI\_ROADSA\_NOBUIL.shp”
+
+-  ``-belsup`` a support for the Belief computation
+
+-  ``-plasup`` a support for the Plausibility computation
+
+-  ``-descmod`` a fuzzy model FuzzyModel.xml
+
+The output is a vector data containing only the validated samples.
+
+::
+
+    otbcli_VectorDataDSValidation -in      extractedRoads_enriched.shp
+                                  -descmod FuzzyModel.xml
+                                  -out     validatedSamples.shp
+
diff --git a/Documentation/Cookbook/rst/index.rst b/Documentation/Cookbook/rst/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4459a0c85319222623361758396a2e9bf69b9e91
--- /dev/null
+++ b/Documentation/Cookbook/rst/index.rst
@@ -0,0 +1,27 @@
+.. OTB documentation master file, created by
+   sphinx-quickstart on Thu Jul  9 11:22:08 2015
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to OTB CookBook's documentation!
+========================================
+
+.. toctree::
+   :maxdepth: 3
+
+   OTB-Applications
+   Monteverdi
+   Monteverdi2
+   Recipes
+   Applications
+
+   
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/Documentation/Cookbook/rst/optpreproc.rst b/Documentation/Cookbook/rst/optpreproc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1e595c8f556a53b5409b0cada132e6c63de86496
--- /dev/null
+++ b/Documentation/Cookbook/rst/optpreproc.rst
@@ -0,0 +1,767 @@
+From raw image to calibrated product
+====================================
+
+This section presents various pre-processing tasks that are presented in
+a classical order to obtain a calibrated, pan-sharpened image.
+
+Optical radiometric calibration
+-------------------------------
+
+In remote sensing imagery, pixel values are called DN (for Digital
+Numbers) and can not be physically interpreted and compared: they are
+influenced by various factors such as the amount of light flowing trough
+the sensor, the gain of the detectors and the analogic to numeric
+converter.
+
+Depending on the season, the light and atmospheric conditions, the
+position of the sun or the sensor internal parameters, these DN can
+drastically change for a given pixel (apart from any ground change
+effects). Moreover, these effects are not uniform over the spectrum: for
+instance aerosol amount and type has usually more impact on the blue
+channel.
+
+Therefore, it is necessary to calibrate the pixel values before any
+physical interpretation is made out of them. In particular, this
+processing is mandatory before any comparison of pixel spectrum between
+several images (from the same sensor), and to train a classifier without
+dependence to the atmospheric conditions at the acquisition time.
+
+Calibrated values are called surface reflectivity, which is a ratio
+denoting the fraction of light that is reflected by the underlying
+surface in the given spectral range. As such, its values lie in the
+range :math:`[0,1]`. For convenience, images are often stored in
+thousandth of reflectivity, so that they can be encoded with an integer
+type. Two levels of calibration are usually distinguished:
+
+-  The first level is called *Top Of Atmosphere (TOA)* reflectivity. It
+   takes into account the sensor gain, sensor spectral response and the
+   solar illumination.
+
+-  The second level is called *Top Of Canopy (TOC)* reflectivity. In
+   addition to sensor gain and solar illumination, it takes into account
+   the optical thickness of the atmosphere, the atmospheric pressure,
+   the water vapor amount, the ozone amount, as well as the composition
+   and amount of aerosol gasses.
+
+This transformation can be done either with or with . Sensor-related
+parameters such as gain, date, spectral sensitivity and sensor position
+are seamlessly read from the image metadata. Atmospheric parameters can
+be tuned by the user. Supported sensors are :
+
+-  Pleiades,
+
+-  SPOT5,
+
+-  QuickBird,
+
+-  Ikonos,
+
+-  WorldView-1,
+
+-  WorldView-2,
+
+-  Formosat.
+
+Optical calibration with 
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application allows to perform optical calibration. The mandatory
+parameters are the input and output images. All other parameters are
+optional. By default the level of calibration is set to TOA (Top Of
+Atmosphere). The output images are expressed in thousandth of
+reflectivity using a 16 bits unsigned integer type.
+
+A basic TOA calibration task can be performed with the following command
+:
+
+::
+
+    otbcli_OpticalCalibration -in  input_image -out output_image
+
+A basic TOC calibration task can be performed with the following command
+:
+
+::
+
+    otbcli_OpticalCalibration -in  input_image -out output_image -level toc
+
+Optical calibration with 
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These transformations can also be done in .
+
+The 6S model needs atmospheric parameters to be able to compute
+radiative terms to estimate the atmospheric contributions on the input
+signal. Default parameters are available in the module. For atmospheric
+parameters, it is possible to indicate AERONET file. The AERONET
+(AErosol RObotic NETwork) program is a federation of ground-based remote
+sensing aerosol networks established by NASA and PHOTONS (Univ. of Lille
+1, CNES, and CNRS-INSU) and is greatly expanded by collaborators from
+national agencies, institutes, universities, individual scientists, and
+partners. The program provides accessible public domain database of
+aerosol optical, mircrophysical and radiative properties.
+
+The module produces four outputs:
+
+-  Luminance image.
+
+-  TOA reflectance image.
+
+-  TOC reflectance image.
+
+-  Difference TOA-TOC image, which allows to get the estimation of
+   atmospheric contribution.
+
+|image| [fig:opticalcalibration]
+
+|image| [fig:opticalcalibrationoutput]
+
+Pan-sharpening
+--------------
+
+Because of physical constrains on the sensor design, it is difficult to
+achieve high spatial and spectral resolution at the same time : a better
+spatial resolution means a smaller detector, which in turns means lesser
+optical flow on the detector surface. On the contrary, spectral bands
+are obtained through filters applied on the detector surface, that
+lowers the optical flow, so that it is necessary to increase the
+detector size to achieve an acceptable signal to noise ratio.
+
+For these reasons, many high resolution satellite payload are composed
+of two sets of detectors, which in turns delivers two different kind of
+images :
+
+-  The multi-spectral (XS) image, composed of 3 to 8 spectral bands
+   containing usually blue, green, red and near infra-red bands at a
+   given resolution (usually from 2.8 meters to 2 meters).
+
+-  The panchromatic (PAN) image, which is a grayscale image acquired by
+   a detector covering a wider part of the light spectrum, which allows
+   to increase the optical flow and thus to reduce pixel size.
+   Therefore, resolution of the panchromatic image is usually around 4
+   times lower than the resolution of the multi-spectral image (from 46
+   centimeters to 70 centimeters).
+
+It is very frequent that those two images are delivered side by side by
+data providers. Such a dataset is called a bundle. A very common remote
+sensing processing is to fuse the panchromatic image with the
+multi-spectral one so as to get an image combining the spatial
+resolution of the panchromatic image with the spectral richness of the
+multi-spectral image. This operation is called pan-sharpening.
+
+This fusion operation requires two different steps :
+
+#. The multi-spectral (XS) image is zoomed and registered to the
+   panchromatic image,
+
+#. A pixel-by-pixel fusion operator is applied to the co-registered
+   pixels of the multi-spectral and panchromatic image to obtain the
+   fused pixels.
+
+Using either or modules from , it is possible to perform both steps in a
+row, or step-by-step fusion, as described in the above sections.
+
+Pan-sharpening with 
+~~~~~~~~~~~~~~~~~~~~
+
+The application allows to perform both steps in a row. Seamless sensor
+modelling is used to perform zooming and registration of the
+multi-spectral image on the panchromatic image. In the case of a
+Pléiades bundle, a different approach is used : an affine transform is
+used to zoom the multi-spectral image and apply a residual translation.
+This translation is computed based on metadata about the geometric
+processing of the bundle. This zooming and registration of the
+multi-spectral image over the panchromatic image can also be performed
+by the application.
+
+After the registration step, a simple pan-sharpening is applied,
+according to the following formula:
+
+.. math:: PXS(i,j) = \frac{PAN(i,j)}{PAN_{smooth}(i,j)} \cdot XS(i,j)
+
+Where :math:`i` and :math:`j` are pixels indices, :math:`PAN` is the
+panchromatic image, :math:`XS` is the multi-spectral image and
+:math:`PAN_{smooth}` is the panchromatic image smoothed with a kernel to
+fit the multi-spectral image scale.
+
+Here is a simple example of how to use the application:
+
+::
+
+    otbcli_BundleToPerfectSensor -inp pan_image -inxs xs_image -out output_image
+
+There are also optional parameters that can be useful for this tool:
+
+-  The ``-elev`` option allows to specify the elevation, either with a
+   DEM formatted for OTB (``-elev.dem`` option, see section [ssec:dem])
+   or with an average elevation (``-elev.default`` option). Since
+   registration and zooming of the multi-spectral image is performed
+   using sensor-models, it may happen that the registration is not
+   perfect in case of landscape with high elevation variation. Using a
+   DEM in this case allows to get better registration.
+
+-  The ``-lmSpacing`` option allows to specify the step of the
+   registration grid between the multi-spectral image and panchromatic
+   image. This is expressed in amount of panchromatic pixels. A lower
+   value gives a more precise registration but implies more computation
+   with the sensor models, and thus increase the computation time.
+   Default value is 10 pixels, which gives sufficient precision in most
+   of the cases.
+
+-  The ``-mode`` option allows to select the registration mode for the
+   multi-spectral image. The ``default`` mode uses the sensor model of
+   each image to create a generic “MS to Pan” transform. The ``phr``
+   mode uses a simple affine transform (which doesn’t need an elevation
+   source nor a registration grid).
+
+Pan-sharpening is a quite heavy processing requiring a lot of system
+resource. The ``-ram`` option allows you to limit the amount of memory
+available for the computation, and to avoid overloading your computer.
+Increasing the available amount of RAM may also result in better
+computation time, seems it optimises the use of the system resources.
+Default value is 256 Mb.
+
+Pan-sharpening with 
+~~~~~~~~~~~~~~~~~~~~
+
+allows to perform step-by-step fusion. The followings screenshots
+highlight operations needed to perform Pan-Sharpening.
+
+-  Open panchromatic and multispectral images in monteverdi using the
+   module or using the ``-il`` option of the executable.
+
+-  The module is used to zoomed and registered the multispectral on the
+   panchromatic image. As a result, we get a multispectral dataset with
+   the same geographic extension and the same resolution as the
+   panchromatic image, cf  [fig:qbmulsuper].
+
+   |image| |image| [fig:qbmulsuper]
+
+-  Now the module can be used using the panchromatic and the
+   multispectral images as inputs. It produces a multispectral image
+   with the same resolution and geographic extension (cf
+    [fig:pansharpen]).
+
+   |image| [fig:pansharpen]
+
+Please also note that since registration and zooming of the
+multi-spectral image with the panchromatic image relies on sensor
+modelling, this tool will work only for images whose sensor models is
+available in (see section [ssec:ortho] for a detailed list). It will
+also work with ortho-ready products in cartographic projection.
+
+Digital Elevation Model management
+----------------------------------
+
+A Digital Elevation Model (DEM) is a georeferenced image (or collection
+of images) where each pixel corresponds to a local elevation. DEM are
+useful for tasks involving sensor to ground and ground to sensor
+coordinate transforms, like during ortho-rectification (see
+section [ssec:ortho]). These transforms need to find the intersection
+between the line of sight of the sensor and the earth geoid. If a simple
+spheroid is used as the earth model, potentially high localisation
+errors can be made in areas where elevation is high or perturbed. Of
+course, DEM accuracy and resolution have a great impact on the precision
+of these transforms.
+
+Two main available DEM, free of charges, and with worldwide cover, are
+both delivered as 1 degree by 1 degree tiles:
+
+-  `The Shuttle Radar topographic Mission
+   (SRTM) <http://www2.jpl.nasa.gov/srtm/>`__ is a 90 meters resolution
+   DEM, obtained by radar interferometry during a campaign of the
+   Endeavour space shuttle from NASA in 2000.
+
+-  The `Advanced Spaceborne Thermal Emission and Reflection Radiometer
+   (ASTER) <http://www.ersdac.or.jp/GDEM/E/2.html>`__ is a 30 meters
+   resolution DEM obtained by stereoscopic processing of the archive of
+   the ASTER instrument.
+
+The relies on capabilities for sensor modelling and DEM handling. Tiles
+of a given DEM are supposed to be located within a single directory.
+General elevation support is also supported from GeoTIFF files.
+
+Whenever an application or module requires a DEM, the option
+**elev.dem** allows set the DEM directory. This directory must contains
+the DEM tiles, either in DTED or SRTM format, either as GeoTIFF files.
+Subdirectories are not supported.
+
+Depending on the reference of the elevation, you also need to use a
+geoid to manage elevation accurately. For this, you need to specify a
+path to a file which contains the geoid. Geoid corresponds to the
+equipotential surface that would coincide with the mean ocean surface of
+the Earth (see `) <http://en.wikipedia.org/wiki/Geoid>`__. We provide
+one geoid in the OTB-Data repository available
+`here <http://hg.orfeo-toolbox.org/OTB-Data/file/4722d9e672c6/Input/DEM/egm96.grd>`__.
+
+In all applications, the option **elev.geoid** allows to manage the path
+to the geoid. Finally, it is also possible to use an average elevation
+in case no DEM is available by using the **elev.default** option.
+
+Ortho-rectification and map projections
+---------------------------------------
+
+There are several level of products available on the remote sensing
+imagery market. The most basic level often provide the geometry of
+acquisition (sometimes called the raw geometry). In this case, pixel
+coordinates can not be directly used as geographical positions. For most
+sensors (but not for all), the different lines corresponds to different
+acquisition times and thus different sensor positions, and different
+rows correspond to different cells of the detector.
+
+The mapping of a raw image so as to be registered to a cartographic grid
+is called ortho-rectification, and consist in inverting the following
+effects (at least):
+
+-  In most cases, lines are orthogonal to the sensor trajectory, which
+   is not exactly (and in some case not at all) following a north-south
+   axis,
+
+-  Depending on the sensor, the line of sight may be different from a
+   Nadir (ground position of the sensor), and thus a projective warping
+   may appear,
+
+-  The variation of height in the landscape may result in severe warping
+   of the image.
+
+Moreover, depending on the area of the world the image has been acquired
+on, different map projections should be used.
+
+The ortho-rectification process is as follows: once an appropriate map
+projection has been defined, a localisation grid is computed to map
+pixels from the raw image to the ortho-rectified one. Pixels from the
+raw image are then interpolated according to this grid in order to fill
+the ortho-rectified pixels.
+
+Ortho-rectification can be performed either with or . Sensor parameters
+and image meta-data are seamlessly read from the image files without
+needing any user interaction, provided that all auxiliary files are
+available. The sensor for which supports ortho-rectification of raw
+products are the following:
+
+-  Pleiades,
+
+-  SPOT5,
+
+-  Ikonos,
+
+-  Quickbird,
+
+-  GeoEye,
+
+-  WorldView.
+
+In addition, GeoTiff and other file format with geographical information
+are seamlessly read by , and the ortho-rectification tools can be used
+to re-sample these images in another map projection.
+
+Beware of “ortho-ready” products
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are some image products, called “ortho-ready”, that should be
+processed carefully. They are actual products in raw geometry, but their
+metadata also contains projection data :
+
+-  a map projection
+
+-  a physical origin
+
+-  a physical spacing
+
+-  and sometimes an orientation angle
+
+The purpose of this projection information is to give an approximate map
+projection to a raw product. It allows you to display the raw image in a
+GIS viewer at the (almost) right location, without having to reproject
+it. Obviously, this map projection is not as accurate as the sensor
+parameters of the raw geometry. In addition, the impact of the elevation
+model can’t be observed if the map projection is used. In order to
+perform an ortho-rectification on this type of product, the map
+projection has to be hidden from .
+
+You can see if a product is an “ortho-ready” product by using tools such
+as ``gdalinfo`` or (see [app:ReadImageInfo]), and check if the product
+verifies the 2 following conditions :
+
+-  The product is in raw geometry : you should expect the presence of
+   RPC coefficients and a non-empty OSSIM keywordlist.
+
+-  The product has a map projection : you should see a projection name
+   with physical origin and spacing.
+
+In that case, you can hide the map projection from the by using
+*extended* filenames. Instead of using the plain input image path, you
+append a specific key at the end :
+
+::
+
+    "path_to_image?&skipcarto=true"
+
+The double quote can be necessary for a successful parsing. More details
+about the extended filenames can be found
+`here <http://wiki.orfeo-toolbox.org/index.php/ExtendedFileName>`__, and
+also in the .
+
+Ortho-rectification with 
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application allows to perform ortho-rectification and map
+re-projection. The simplest way to use it is the following command:
+
+::
+
+    otbcli_OrthoRectification -io.in input_image -io.out output_image
+
+In this case, the tool will automatically estimates all the necessary
+parameters:
+
+-  The map projection is set to UTM (a worldwide map projection) and the
+   UTM zone is automatically estimated,
+
+-  The ground sampling distance of the output image is computed to fit
+   the image resolution,
+
+-  The region of interest (upper-left corner and size of the image) is
+   estimated so as to contain the whole input image extent.
+
+In order to use a Digital Elevation Model (see section [ssec:dem]) for
+better localisation performances, one can pass the directory containing
+the DEM tiles to the application:
+
+::
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+
+If one wants to use a different map projection, the *-map* option may be
+used (example with *lambert93* map projection):
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+
+Map projections handled by the application are the following (please
+note that the ellipsoid is always WGS84):
+
+-  | UTM : ``-map utm``
+   | The UTM zone and hemisphere can be set by the options
+   ``-map.utm.zone`` and ``-map.utm.northhem``.
+
+-  Lambert 2 etendu: ``-map lambert2``
+
+-  Lambert 93: ``-map lambert93``
+
+-  | TransMercator: ``-map transmercator``
+   | The related parameters (false easting, false northing and scale
+   factor) can be set by the options
+   ``-map.transmercator.falseeasting``,
+   ``-map.transmercator.falsenorthing`` and ``-map.transmercator.scale``
+
+-  WGS : ``-map wgs``
+
+-  | Any map projection system with an EPSG code : ``-map epsg``
+   | The EPSG code is set with the option ``-map.epsg.code``
+
+The group ``outputs`` contains parameters to set the origin, size and
+spacing of the output image. For instance, the ground spacing can be
+specified as follows:
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+                              -outputs.spacingx spx
+                              -outputs.spacingy spy
+
+Please note that since the y axis of the image is bottom oriented, the y
+spacing should be negative to avoid switching north and south direction.
+
+A user-defined region of interest to ortho-rectify can be specified as
+follows:
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+                              -outputs.spacingx spx
+                              -outputs.spacingy spy
+                              -outputs.ulx ul_x_coord
+                              -outputs.uly ul_y_coord
+                              -outputs.sizex x_size
+                              -outputs.sizey y_size
+
+Where the ``-outputs.ulx`` and ``-outputs.uly`` options allow to specify
+the coordinates of the upper-left corner of the output image. The
+``-outputs.sizex`` and ``-outputs.sizey`` options allow to specify the
+size of the output image.
+
+A few more interesting options are available:
+
+-  The ``-opt.rpc`` option allows to use an estimated RPC model instead
+   of the rigorous SPOT5 model, which speeds-up the processing,
+
+-  The ``-opt.gridspacing`` option allows to define the spacing of the
+   localisation grid used for ortho-rectification. A coarser grid
+   results in speeding-up the processing, but with potential loss of
+   accuracy. A standard value would be 10 times the ground spacing of
+   the output image.
+
+-  The ``-interpolator`` option allows to change the interpolation
+   algorithm between nearest neighbor, linear and bicubic. Default is
+   nearest neighbor interpolation, but bicubic should be fine in most
+   cases.
+
+-  The ``-opt.ram`` option allows to specify the amount of memory
+   available for the processing (in Mb). Default is 256 Mb. Increasing
+   this value to fit the available memory on your computer might
+   speed-up the processing.
+
+Residual registration
+---------------------
+
+Image registration is a fundamental problem in image processing. The aim
+is to align two or more images of the same scene often taken at
+different times, from different viewpoints, or by different sensors. It
+is a basic step for orthorectification, image stitching, image fusion,
+change detection…But this process is also critical for stereo
+reconstruction process to be able to obtain an accurate estimation of
+epipolar geometry.
+
+Sensor model is generally not sufficient to provide image registrations.
+Indeed, several sources of geometric distortion can be contained in
+optical remote sensing images including earth rotation, platform
+movement, non linearity…
+
+They result in geometric errors on scene level, image level and pixel
+level. It is critical to rectify the errors before a thematic map is
+generated, especially when the remote sensing data need to be integrated
+together with other GIS data.
+
+This figure illustrates the generic workflow in the case of image series
+registration:
+
+[scale=0.15] (-1,-12) rectangle (75,17); in 5,...,1 (,) rectangle
++(4,4); (InputSeries) at (4,-1) Input series; (9,5) – +(3,0); (12.2,3)
+rectangle +(6,4); (SensorModel) at (15,5) Sensor Model; (1,-10)
+rectangle +(4,4); (DEM) at (5,-11) DEM; (3,-5.5) – ++(0,3) – ++(12,0) –
+++(0,5); (18.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4);
+(GeoRefSeries) at (28,-1) Geo-referenced Series;
+
+(25.5,8.5) – +(0,3);
+
+(22,12) rectangle +(8.5,4); (HomPoExtr) at (27,14) Homologous Points;
+
+(21.5,14) – +(-2.5,0);
+
+(11,12) rectangle +(8,4); (BBAdj) at (15.5,14) Bundle-block Adjustement;
+
+(15,11.5) – +(0,-4);
+
+(30,5) – +(3,0); (33.2,2.5) rectangle +(6,4.5); (FineRegistration) at
+(36,4.9) Fine Registration;
+
+(39.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4); (RegistSeries) at
+(47,-1) Registered Series; (36,2) – ++(0,-10) – ++(-30,0);
+
+(52,5) – +(3,0); (55.2,2.5) rectangle +(6,4.5); (CartoProjection) at
+(57.5,4.9) Map Projection;
+
+(61.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4); (CartoSeries) at
+(68,-1) Cartographic Series;
+
+We will now illustrate this process by applying this workflow to
+register two images. This process can be easily extended to perform
+image series registration.
+
+The aim of this example is to describe how to register a Level 1
+QuickBird image over an orthorectify Pleiades image over the area of
+Toulouse, France.
+
+|image| |image| [fig:InputImagesRegistration]
+
+Extract metadata from the image reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We first dump geometry metadata of the image we want to refine in a text
+file. In OTB, we use the extension *.geom* for this type of file. As you
+will see the application which will estimate a refine geometry only
+needs as input this metadata and a set of homologous points. The
+refinement application will create a new *.geom* file containing refined
+geometry parameters which can be used after for reprojection for
+example.
+
+The use of external *.geom* file is available in OTB since release
+:math:`3.16`. See
+`here <http://wiki.orfeo-toolbox.org/index.php/ExtendedFileName>`__ for
+more information.
+
+::
+
+
+    otbcli_ReadImageInfo   -in slave_image
+                           -outkwl TheGeom.geom
+
+Extract homologous points from images
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main idea of the residual registration is to estimate an second
+transformation (after the application of sensors model).
+
+The homologous point application use interest point detection method to
+get a set of point which match in both images.
+
+The basic idea is to use this set of homologous points and estimate with
+them a residual transformation between the two images.
+
+There is a wide variety of keypoint detector in the literature. They
+allow to detect and describe local features in images. These algorithms
+provide for each interesting point a “feature description”. This
+descriptor has the property to be invariant to image translation,
+scaling, and rotation, partially invariant to illumination changes and
+robust to local geometric distortion. keypoints. Features extracted from
+the input images are then matched against each other. These
+correspondences are then used to create the homologous points.
+
+`SIFT <http://en.wikipedia.org/wiki/Scale-invariant_feature_transform>`__
+or `SURF <http://en.wikipedia.org/wiki/SURF>`__ keypoints can be
+computed in the application. The band on which keypoints are computed
+can be set independently for both images.
+
+The application offers two modes :
+
+-  the first is the full mode where keypoints are extracted from the
+   full extent of both images (please note that in this mode large image
+   file are not supported).
+
+-  The second mode, called *geobins*, allows to set-up spatial binning
+   so as to get fewer points spread across the entire image. In this
+   mode, the corresponding spatial bin in the second image is estimated
+   using geographical transform or sensor modeling, and is padded
+   according to the user defined precision.
+
+Moreover, in both modes the application can filter matches whose
+co-localization in the first image exceed this precision. Last, the
+elevation parameters allow to deal more precisely with sensor modelling
+in case of sensor geometry data. The *outvector* option allows to create
+a vector file with segments corresponding to the localization error
+between the matches.
+
+Finally, with the *2wgs84* option, you can match two sensor geometry
+images or a sensor geometry image with an ortho-rectified reference. In
+all cases, you get a list of ground control points spread all over your
+image.
+
+::
+
+
+
+    otbcli_HomologousPointsExtraction   -in1 slave_image
+                                        -in2 reference_image
+                                        -algorithm surf
+                                        -mode geobins
+                                        -mode.geobins.binstep 512
+                                        -mode.geobins.binsize 512
+                                        -mfilter 1
+                                        -precision 20
+                                        -2wgs84 1
+                                        -out homologous_points.txt
+                                        -outvector points.shp
+                                        -elev.dem dem_path/SRTM4-HGT/
+                                        -elev.geoid OTB-Data/Input/DEM/egm96.grd
+
+Note that for a proper use of the application, elevation must be
+correctly set (including DEM and geoid file).
+
+Geometry refinement using homologous points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we can use this set of tie points to estimate a residual
+transformation.For this we use the dedicated application called
+**RefineSensorModel**. This application make use of OSSIM capabilities
+to align the sensor model.
+
+It reads the input geometry metadata file (*.geom*) which contains the
+sensor model information that we want to refine and the text file
+(homologous\_points.txt) containing the list of ground control point. It
+performs a least-square fit of the sensor model adjustable parameters to
+these tie points and produces an updated geometry file as output (the
+extension which is always use is *.geom*)
+
+The application can provide as well an optional ground control points
+based statistics file and a vector file containing residues that you can
+display in a GIS software.
+
+Please note again that for a proper use of the application, elevation
+must be correctly set (including DEM and geoid file). The map parameters
+allows to choose a map projection in which the accuracy will be
+estimated (in meters).
+
+Accuracy values are provided as output of the application (computed
+using tie points location) and allow also to control the precision of
+the estimated model.
+
+::
+
+
+    otbcli_RefineSensorModel   -elev.dem dem_path/SRTM4-HGT/
+                               -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                               -ingeom slave_image.geom
+                               -outgeom refined_slave_image.geom
+                               -inpoints homologous_points.txt
+                               -outstat stats.txt
+                               -outvector refined_slave_image.shp
+
+Orthorecrtify image using the affine geometry
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now we will show how we can use this new sensor model. In our case we’ll
+use this sensor model to orthorectify the image over the Pléiades
+reference. offers since version 3.16 the possibility to use
+hrefhttp://wiki.orfeo-toolbox.org/index.php/ExtendedFileNameextend image
+path to use different metadata file as input. That’s what we are going
+to use there to orthorectify the QuickBird image using the *.geom* file
+obtained by the **RefineSensorModel** applications. over the first one
+using for the second image estimated sensor model which take into
+account the original sensor model of the slave and which also fit to the
+set of tie points.
+
+::
+
+
+    otbcli_OrthoRectification   -io.in slave_image?&geom=TheRefinedGeom.geom
+                                -io.out ortho_slave_image
+                                -elev.dem dem_path/SRTM4-HGT/
+                                -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                         
+
+As a result, if you’ve got enough homologous points in images and
+control that the residual error between the set of tie points and the
+estimated sensor model is small, you must achieve a good registration
+now between the 2 rectified images. Normally far better than ’only’
+performing separate orthorectification over the 2 images.
+
+This methodology can be adapt and apply in several cases, for example :
+
+-  register stereo pair of images and estimate accurate epipolar
+   geometry
+
+-  registration prior to change detection
+
+.. |image| image:: ../Art/MonteverdiImages/monteverdi_optical_calibration.png
+.. |image| image:: ../Art/MonteverdiImages/monteverdi_optical_calibration_outputs.png
+.. |image| image:: ../Art/MonteverdiImages/monteverdi_QB_PAN_ROI.png
+.. |image| image:: ../Art/MonteverdiImages/monteverdi_QB_MUL_Superimpose.png
+.. |image| image:: ../Art/MonteverdiImages/monteverdi_QB_XS_pan-sharpened.png
+.. |image| image:: ../Art/MonteverdiImages/registration_pleiades_ql.png
+.. |image| image:: ../Art/MonteverdiImages/registration_quickbird_ql.png
diff --git a/Documentation/Cookbook/rst/pbclassif.rst b/Documentation/Cookbook/rst/pbclassif.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f458f0f7402053fab3bb40db805cf94b90c3c18a
--- /dev/null
+++ b/Documentation/Cookbook/rst/pbclassif.rst
@@ -0,0 +1,616 @@
+Classification
+==============
+
+Pixel based classification
+--------------------------
+
+The classification in the application framework provides a supervised
+pixel-wise classification chain based on learning from multiple images,
+and using one specified machine learning method like SVM, Bayes, KNN,
+Random Forests, Artificial Neural Network, and others...(see application
+help of for further details about all the available classifiers). It
+supports huge images through streaming and multi-threading. The
+classification chain performs a training step based on the intensities
+of each pixel as features. Please note that all the input images must
+have the same number of bands to be comparable.
+
+Statistics estimation
+~~~~~~~~~~~~~~~~~~~~~
+
+In order to make these features comparable between each training images,
+the first step consists in estimating the input images statistics. These
+statistics will be used to center and reduce the intensities (mean of 0
+and standard deviation of 1) of samples based on the vector data
+produced by the user. To do so, the tool can be used:
+
+::
+
+    otbcli_ComputeImagesStatistics -il  im1.tif im2.tif im3.tif
+                                   -out images_statistics.xml
+
+This tool will compute each band mean, compute the standard deviation
+based on pooled variance of each band and finally export them to an XML
+file. The features statistics XML file will be an input of the following
+tools.
+
+Building the training data set
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As the chain is supervised, we first need to build a training set with
+positive examples of different objects of interest. These polygons must
+be saved in OGR vector format supported by GDAL like ESRI shapefile for
+example.
+
+Please note that the positive examples in the vector data should have a
+\`\`Class\`\` field with a label value higher than 1 and coherent in
+each images.
+
+You can generate the vector data set with software for example and save
+it in an OGR vector format supported by (ESRI shapefile for example).
+should be able to transform the vector data into the image coordinate
+system.
+
+Performing the learning scheme
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once images statistics have been estimated, the learning scheme is the
+following:
+
+#. For each input image:
+
+   #. Read the region of interest (ROI) inside the shapefile,
+
+   #. Generate validation and training data within the ROI,
+
+   #. Add vectors respectively to the training samples set and the
+      validation samples set.
+
+#. Increase the size of the training samples set and balance it by
+   generating new noisy samples from the previous ones,
+
+#. Perform the learning with this training set
+
+#. Estimate performances of the classifier on the validation samples set
+   (confusion matrix, precision, recall and F-Score).
+
+Let us consider a SVM classification. These steps can be performed by
+the command-line using the following:
+
+::
+
+    otbcli_TrainImagesClassifier -io.il      im1.tif im2.tif im3.tif
+                                 -io.vd      vd1.shp vd2.shp vd3.shp
+                                 -io.imstat  images_statistics.xml
+                                 -classifier svm (classifier_for_the_training)
+                                 -io.out     model.svm
+
+Additional groups of parameters are also available (see application help
+for more details):
+
+-  ``-elev`` Handling of elevation (DEM or average elevation)
+
+-  ``-sample`` Group of parameters for sampling
+
+-  ``-classifier`` Classifiers to use for the training, and their
+   corresponding groups of parameters
+
+Using the classification model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once the classifier has been trained, one can apply the model to
+classify pixel inside defined classes on a new image using the
+application:
+
+::
+
+    otbcli_ImageClassifier -in     image.tif
+                           -imstat images_statistics.xml
+                           -model  model.svm
+                           -out    labeled_image.tif
+
+You can set an input mask to limit the classification to the mask area
+with value >0.
+
+Validating the classification model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The performance of the model generated by the application is directly
+estimated by the application itself, which displays the precision,
+recall and F-score of each class, and can generate the global confusion
+matrix as an output \*.CSV file.
+
+With the application, it is also possible to estimate the performance of
+a model from a classification map generated with the application. This
+labeled image is compared to positive reference samples (either
+represented as a raster labeled image or as a vector data containing the
+reference classes). It will compute the confusion matrix and precision,
+recall and F-score of each class too, based on the
+`ConfusionMatrixCalculator <http://www.orfeo-toolbox.org/doxygen-current/classotb_1_1ConfusionMatrixCalculator.html>`__
+class.
+
+::
+
+    otbcli_ComputeConfusionMatrix -in                labeled_image.tif
+                                  -ref               vector
+                                  -ref.vector.in     vectordata.shp
+                                  -ref.vector.field  Class (name_of_label_field)
+                                  -out               confusion_matrix.csv
+
+Fancy classification results
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Color mapping can be used to apply color transformations on the final
+graylevel label image. It allows to get an RGB classification map by
+re-mapping the image values to be suitable for display purposes. One can
+use the application. This tool will replace each label with an 8-bits
+RGB color specified in a mapping file. The mapping file should look like
+this :
+
+::
+
+    # Lines beginning with a # are ignored
+    1 255 0 0
+
+In the previous example, 1 is the label and 255 0 0 is a RGB color (this
+one will be rendered as red). To use the mapping tool, enter the
+following :
+
+::
+
+    otbcli_ColorMapping -in                labeled_image.tif
+                        -method            custom
+                        -method.custom.lut lut_mapping_file.txt
+                        -out               RGB_color_image.tif
+
+Other look-up tables (LUT) are available : standard continuous LUT,
+optimal LUT, and LUT computed over a support image.
+
+Example
+~~~~~~~
+
+We consider 4 classes: water, roads, vegetation and buildings with red
+roofs. Data is available in the OTB-Data
+`repository <http://hg.orfeo-toolbox.org/OTB-Data/file/0fed8f4f035c/Input/Classification>`__
+and this image is produced with the commands inside this
+`file <http://hg.orfeo-toolbox.org/OTB-Applications/file/3ce975605013/Testing/Classification/CMakeLists.txt>`__.
+
+|image| |image| |image| [fig:MeanShiftVectorImageFilter]
+
+Fusion of classification maps
+-----------------------------
+
+After having processed several classifications of the same input image
+but from different models or methods (SVM, KNN, Random Forest,...), it
+is possible to make a fusion of these classification maps with the
+application which uses either majority voting or the Demspter Shafer
+framework to handle this fusion. The Fusion of Classifications generates
+a single more robust and precise classification map which combines the
+information extracted from the input list of labeled images.
+
+The application has the following input parameters :
+
+-  ``-il`` list of input labeled classification images to fuse
+
+-  ``-out`` the output labeled image resulting from the fusion of the
+   input classification images
+
+-  ``-method`` the fusion method (either by majority voting or by
+   Dempster Shafer)
+
+-  ``-nodatalabel`` label for the no data class (default value = 0)
+
+-  ``-undecidedlabel`` label for the undecided class (default value = 0)
+
+The input pixels with the nodata class label are simply ignored by the
+fusion process. Moreover, the output pixels for which the fusion process
+does not result in a unique class label, are set to the undecided value.
+
+Majority voting for the fusion of classifications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the Majority Voting method implemented in the application, the value
+of each output pixel is equal to the more frequent class label of the
+same pixel in the input classification maps. However, it may happen that
+the more frequent class labels are not unique in individual pixels. In
+that case, the undecided label is attributed to the output pixels.
+
+The application can be used like this:
+
+::
+
+    otbcli_FusionOfClassifications  -il             cmap1.tif cmap2.tif cmap3.tif
+                                    -method         majorityvoting
+                                    -nodatalabel    0
+                                    -undecidedlabel 10
+                                    -out            MVFusedClassificationMap.tif
+
+Let us consider 6 independent classification maps of the same input
+image (Cf. left image in Fig. [fig:MeanShiftVectorImageFilter])
+generated from 6 different SVM models. The Fig.
+[fig:ClassificationMapFusionApplication] represents them after a color
+mapping by the same LUT. Thus, 4 classes (water: blue, roads: gray,
+vegetation: green, buildings with red roofs: red) are observable on each
+of them.
+
+|image| |image| |image| |image| |image| |image|
+[fig:ClassificationMapFusionApplication]
+
+As an example of the application by *majority voting*, the fusion of the
+six input classification maps represented in Fig.
+[fig:ClassificationMapFusionApplication] leads to the classification map
+illustrated on the right in Fig.
+[fig:ClassificationMapFusionApplicationMV]. Thus, it appears that this
+fusion highlights the more relevant classes among the six different
+input classifications. The white parts of the fused image correspond to
+the undecided class labels, i.e. to pixels for which there is not a
+unique majority voting.
+
+|image| |image| [fig:ClassificationMapFusionApplicationMV]
+
+Dempster Shafer framework for the fusion of classifications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application, handles another method to compute the fusion: the
+Dempster Shafer framework. In the `Dempster-Shafer
+theory <http://en.wikipedia.org/wiki/Dempster-Shafer_theory>`__, the
+performance of each classifier resulting in the classification maps to
+fuse are evaluated with the help of the so-called *belief function* of
+each class label, which measures the degree of belief that the
+corresponding label is correctly assigned to a pixel. For each
+classifier, and for each class label, these belief functions are
+estimated from another parameter called the *mass of belief* of each
+class label, which measures the confidence that the user can have in
+each classifier according to the resulting labels.
+
+In the Dempster Shafer framework for the fusion of classification maps,
+the fused class label for each pixel is the one with the maximal belief
+function. In case of multiple class labels maximizing the belief
+functions, the output fused pixels are set to the undecided value.
+
+In order to estimate the confidence level in each classification map,
+each of them should be confronted with a ground truth. For this purpose,
+the masses of belief of the class labels resulting from a classifier are
+estimated from its confusion matrix, which is itself exported as a
+\*.CSV file with the help of the application. Thus, using the Dempster
+Shafer method to fuse classification maps needs an additional input list
+of such \*.CSV files corresponding to their respective confusion
+matrices.
+
+The application can be used like this:
+
+::
+
+    otbcli_FusionOfClassifications  -il             cmap1.tif cmap2.tif cmap3.tif
+                                    -method         dempstershafer
+                                    -method.dempstershafer.cmfl
+                                                    cmat1.csv cmat2.csv cmat3.csv
+                                    -nodatalabel    0
+                                    -undecidedlabel 10
+                                    -out            DSFusedClassificationMap.tif
+
+As an example of the application by *Dempster Shafer*, the fusion of the
+six input classification maps represented in Fig.
+[fig:ClassificationMapFusionApplication] leads to the classification map
+illustrated on the right in Fig.
+[fig:ClassificationMapFusionApplicationDS]. Thus, it appears that this
+fusion gives access to a more precise and robust classification map
+based on the confidence level in each classifier.
+
+|image| |image| [fig:ClassificationMapFusionApplicationDS]
+
+Recommendations to properly use the fusion of classification maps
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to properly use the application, some points should be
+considered. First, the ``list_of_input_images`` and
+``OutputFusedClassificationImage`` are single band labeled images, which
+means that the value of each pixel corresponds to the class label it
+belongs to, and labels in each classification map must represent the
+same class. Secondly, the undecided label value must be different from
+existing labels in the input images in order to avoid any ambiguity in
+the interpretation of the ``OutputFusedClassificationImage``.
+
+Majority voting based classification map regularization
+-------------------------------------------------------
+
+Resulting classification maps can be regularized in order to smoothen
+irregular classes. Such a regularization process improves classification
+results by making more homogeneous areas which are easier to handle.
+
+Majority voting for the classification map regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application performs a regularization of a labeled input image based
+on the Majority Voting method in a specified ball shaped neighborhood.
+For each center pixel, Majority Voting takes the more representative
+value of all the pixels identified by the structuring element and then
+sets the output center pixel to this majority label value. The ball
+shaped neighborhood is identified by its radius expressed in pixels.
+
+Handling ambiguity and not classified pixels in the majority voting based regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since, the Majority Voting regularization may lead to not unique
+majority labels in the neighborhood, it is important to define which
+behavior the filter must have in this case. For this purpose, a Boolean
+parameter (called ip.suvbool) is used in the application to choose
+whether pixels with more than one majority class are set to Undecided
+(true), or to their Original labels (false = default value).
+
+Moreover, it may happen that pixels in the input image do not belong to
+any of the considered class. Such pixels are assumed to belong to the
+NoData class, the label of which is specified as an input parameter for
+the regularization. Therefore, those NoData input pixels are invariant
+and keep their NoData label in the output regularized image.
+
+The application has the following input parameters :
+
+-  ``-io.in`` labeled input image resulting from a previous
+   classification process
+
+-  ``-io.out`` output labeled image corresponding to the regularization
+   of the input image
+
+-  ``-ip.radius`` integer corresponding to the radius of the ball shaped
+   structuring element (default value = 1 pixel)
+
+-  ``-ip.suvbool`` boolean parameter used to choose whether pixels with
+   more than one majority class are set to Undecided (true), or to their
+   Original labels (false = default value). Please note that the
+   Undecided value must be different from existing labels in the input
+   image
+
+-  ``-ip.nodatalabel`` label for the NoData class. Such input pixels
+   keep their NoData label in the output image (default value = 0)
+
+-  ``-ip.undecidedlabel`` label for the Undecided class (default value =
+   0).
+
+The application can be used like this:
+
+::
+
+    otbcli_ClassificationMapRegularization  -io.in              labeled_image.tif
+                                            -ip.radius          3
+                                            -ip.suvbool         true
+                                            -ip.nodatalabel     10
+                                            -ip.undecidedlabel  7
+                                            -io.out             regularized.tif
+
+Recommendations to properly use the majority voting based regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to properly use the application, some points should be
+considered. First, both ``InputLabeledImage`` and ``OutputLabeledImage``
+are single band labeled images, which means that the value of each pixel
+corresponds to the class label it belongs to. The ``InputLabeledImage``
+is commonly an image generated with a classification algorithm such as
+the SVM classification. Remark: both ``InputLabeledImage`` and
+``OutputLabeledImage`` are not necessarily of the same datatype.
+Secondly, if ip.suvbool == true, the Undecided label value must be
+different from existing labels in the input labeled image in order to
+avoid any ambiguity in the interpretation of the regularized
+``OutputLabeledImage``. Finally, the structuring element radius must
+have a minimum value equal to 1 pixel, which is its default value. Both
+NoData and Undecided labels have a default value equal to 0.
+
+Example
+~~~~~~~
+
+Resulting from the application presented in section
+[ssec:classificationcolormapping], and illustrated in Fig.
+[fig:MeanShiftVectorImageFilter], the Fig.
+[fig:ClassificationMapRegularizationApplication] shows a regularization
+of a classification map composed of 4 classes: water, roads, vegetation
+and buildings with red roofs. The radius of the ball shaped structuring
+element is equal to 3 pixels, which corresponds to a ball included in a
+7 x 7 pixels square. Pixels with more than one majority class keep their
+original labels.
+
+|image| |image| |image| [fig:ClassificationMapRegularizationApplication]
+
+Regression
+----------
+
+The machine learning models in OpenCV and LibSVM also support a
+regression mode : they can be used to predict a numeric value (i.e. not
+a class index) from an input predictor. The workflow is the same as
+classification. First, the regression model is trained, then it can be
+used to predict output values. The applications to do that are and .
+
+Input datasets
+~~~~~~~~~~~~~~
+
+The input data set for training must have the following structure :
+
+-  *n* components for the input predictors
+
+-  one component for the corresponding output value
+
+The application supports 2 input formats :
+
+-  An image list : each image should have components matching the
+   structure detailed earlier (*n* feature components + 1 output value)
+
+-  A CSV file : the first *n* columns are the feature components and the
+   last one is the output value
+
+If you have separate images for predictors and output values, you can
+use the application.
+
+::
+
+    otbcli_ConcatenateImages  -il features.tif  output_value.tif
+                              -out training_set.tif
+
+Statistics estimation
+~~~~~~~~~~~~~~~~~~~~~
+
+As in classification, a statistics estimation step can be performed
+before training. It allows to normalize the dynamic of the input
+predictors to a standard one : zero mean, unit standard deviation. The
+main difference with the classification case is that with regression,
+the dynamic of output values can also be reduced.
+
+The statistics file format is identical to the output file from
+application, for instance :
+
+::
+
+    <?xml version="1.0" ?>
+    <FeatureStatistics>
+        <Statistic name="mean">
+            <StatisticVector value="198.796" />
+            <StatisticVector value="283.117" />
+            <StatisticVector value="169.878" />
+            <StatisticVector value="376.514" />
+        </Statistic>
+        <Statistic name="stddev">
+            <StatisticVector value="22.6234" />
+            <StatisticVector value="41.4086" />
+            <StatisticVector value="40.6766" />
+            <StatisticVector value="110.956" />
+        </Statistic>
+    </FeatureStatistics>
+
+In the application, normalization of input predictors and output values
+is optional. There are 3 options :
+
+-  No statistic file : normalization disabled
+
+-  Statistic file with *n* components : normalization enabled for input
+   predictors only
+
+-  Statistic file with *n+1* components : normalization enabled for
+   input predictors and output values
+
+If you use an image list as training set, you can run application. It
+will produce a statistics file suitable for input and output
+normalization (third option).
+
+::
+
+    otbcli_ComputeImagesStatistics  -il   training_set.tif
+                                    -out  stats.xml
+
+Training
+~~~~~~~~
+
+Initially, the machine learning models in OTB only used classification.
+But since they come from external libraries (OpenCV and LibSVM), the
+regression mode was already implemented in these external libraries. So
+the integration of these models in OTB has been improved in order to
+allow the usage of regression mode. As a consequence , the machine
+learning models have nearly the same set of parameters for
+classification and regression mode.
+
+The regression mode is currently supported for :
+
+-  Support Vector Machine (LibSVM and OpenCV)
+
+-  Decision Trees
+
+-  Gradient Boosted Trees
+
+-  Neural Network
+
+-  Random Forests
+
+-  K-Nearest Neighbors
+
+The behaviour of application is very similar to . From the input data
+set, a portion of the samples is used for training, whereas the other
+part is used for validation. The user may also set the model to train
+and its parameters. Once the training is done, the model is stored in an
+output file.
+
+::
+
+    otbcli_TrainRegression  -io.il                training_set.tif
+                            -io.imstat            stats.xml
+                            -io.out               model.txt
+                            -sample.vtr           0.5
+                            -classifier           knn
+                            -classifier.knn.k     5
+                            -classifier.knn.rule  median
+
+Prediction
+~~~~~~~~~~
+
+Once the model is trained, it can be used in application to perform the
+prediction on an entire image containing input predictors (i.e. an image
+with only *n* feature components). If the model was trained with
+normalization, the same statistic file must be used for prediction. The
+behavior of with respect to statistic file is identical to :
+
+-  no statistic file : normalization off
+
+-  *n* components : input only
+
+-  *n+1* components : input and output
+
+The model to use is read from file (the one produced during training).
+
+::
+
+    otbcli_PredictRegression  -in     features_bis.tif
+                              -model  model.txt
+                              -imstat stats.xml
+                              -out    prediction.tif
+
+Samples selection
+-----------------
+
+Since release 5.4, new functionalities related to the handling of the
+vectors from the training data set (see also [sssec:building]) were
+added to OTB.
+
+The first improvement was provided by the application
+PolygonClassStatistics. This application processes a set of training
+geometries, and outputs statistics about the sample distribution in the
+input geometries (in the form of a xml file) :
+
+-  number of samples per class
+
+-  number of samples per geometry
+
+Supported geometries are polygons, lines and points; depending on the
+geometry type, this application behaves differently :
+
+-  polygon : select pixels whose center is inside the polygon
+
+-  lines : select pixels intersecting the line
+
+-  points : select closest pixel to the provided point
+
+The application also takes as input a support image, but the values of
+its pixels are not used. The purpose is rather to define the image grid
+that will later provide the samples. The user can also provide a raster
+mask, that will be used to discard pixel positions.
+
+A simple use of the application PolygonClassStatistics could be as
+follows :
+
+::
+
+    otbcli_PolygonClassStatistics  -in     support_image.tif
+                                   -vec    variousTrainingVectors.sqlite
+                                   -field  class
+                                   -out    polygonStat.xml
+
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_fusion.jpg
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif.jpg
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C1_CM.png
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C2_CM.png
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C3_CM.png
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C4_CM.png
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C5_CM.png
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_C6_CM.png
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_MV_C123456_CM.png
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image| image:: ../Art/MonteverdiImages/QB_1_ortho_DS_V_P_C123456_CM.png
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_CMR_input.png
+.. |image| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_CMR_3.png
diff --git a/Documentation/Cookbook/rst/recipes/bandmathx.rst b/Documentation/Cookbook/rst/recipes/bandmathx.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3284d98e3c88431e6c046d23f9af682b7ca61aa6
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/bandmathx.rst
@@ -0,0 +1,515 @@
+.. highlight:: c++
+
+.. role:: math(raw)
+   :format: html latex
+..
+
+BandMathImageFilterX (based on muParserX)
+=========================================
+
+This section describes how to use the BandMathImageFilterX.
+
+Fundamentals: headers, declaration and instantiation
+----------------------------------------------------
+
+A simple example is given below:
+
+::
+
+
+    #include "otbBandMathImageFilterX.h"
+    #include "otbVectorImage.h"
+
+    int otbBandMathImageFilterXNew( int itkNotUsed(argc), char* itkNotUsed(argv) [])
+    {
+        typedef double                                                      PixelType;
+        typedef otb::VectorImage<PixelType, 2>                      ImageType;
+        typedef otb::BandMathImageFilterX<ImageType>                FilterType;
+
+        FilterType::Pointer         filter       = FilterType::New();
+
+        return EXIT_SUCCESS;
+    }
+
+As we can see, the new band math filter works with the class
+otb::VectorImage.
+
+Syntax : first elements
+-----------------------
+
+The default prefix name for variables related to the ith input is
+*im(i+1)* (note the indexing from 1 to N, for N inputs). The user has
+the possibility to change this default behaviour by setting its own
+prefix.
+
+::
+
+
+    // All variables related to image1 (input 0) will have the prefix im1 
+    filter->SetNthInput(0, image1);         
+
+    // All variables related to image2 (input 1) will have the prefix  toulouse   
+    filter->SetNthInput(1, image2, "toulouse");   
+
+    // All variables related to anotherImage (input 2) will have the prefix im3
+    filter->SetNthInput(2, anotherImage);      
+
+In this document, we will keep the default convention. Following list
+summaries the available variables for input #0 (and so on for every
+input).
+
+Variables and their descriptions:
+
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| Variables             | Description                                                                          | Type     |
++=======================+======================================================================================+==========+
+| im1                   | a pixel from first input, made of n components/bands (first image is indexed by 1)   | Vector   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bj                 | jth component of a pixel from first input (first band is indexed by 1)               | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjNkxp             | a neighbourhood (”N”) of pixels of the jth component from first input, of size kxp   | Matrix   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjMini             | global statistic : minimum of the jth band from first input                          | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjMaxi             | global statistic : maximum of the jth band from first input                          | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjMean             | global statistic : mean of the jth band from first input                             | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjSum              | global statistic : sum of the jth band from first input                              | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1bjVar              | global statistic : variance of the jth band from first input                         | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+| im1PhyX and im1PhyY   | spacing of first input in X and Y directions                                         | Scalar   |
++-----------------------+--------------------------------------------------------------------------------------+----------+
+
+[variables]
+
+Moreover, we also have the generic variables idxX and idxY that
+represent the indices of the current pixel (scalars).
+
+Note that the use of a global statistics will automatically make the
+filter (or the application) request the largest possible regions from
+the concerned input images, without user intervention.
+
+For instance, the following formula (addition of two pixels)
+
+.. math:: im1+im2
+
+[firstequation]
+
+is correct only if the two first inputs have the same number of bands.
+In addition, the following formula is not consistent even if im1
+represents a pixel of an image made of only one band:
+
+.. math:: im1+1
+
+A scalar can’t be added to a vector. The right formula is instead (one
+can notice the way that muParserX allows to define vectors on the fly):
+
+.. math:: im1+\{ 1 \}
+
+or
+
+.. math:: im1 + \{1,1,1,...,1\}
+
+if im1 is made of n components.
+
+On the other hand, the variable im1b1 for instance is represented as a
+scalar; so we have the following different possibilities:
+
+Correct / incorrect expressions:
+
++-----------------------+---------------------------------------------------------------------------------+
+| Expression            | Status                                                                          |
++=======================+=================================================================================+
+| im1b1 + 1             | correct                                                                         |
++-----------------------+---------------------------------------------------------------------------------+
+| {im1b1} + {1}         | correct                                                                         |
++-----------------------+---------------------------------------------------------------------------------+
+| im1b1 + {1}           | incorrect                                                                       |
++-----------------------+---------------------------------------------------------------------------------+
+| {im1b1} + 1           | incorrect                                                                       |
++-----------------------+---------------------------------------------------------------------------------+
+| im1 + {im2b1,im2b2}   | correct if im1 represents a pixel of two components (equivalent to im1 + im2)   |
++-----------------------+---------------------------------------------------------------------------------+
+
+
+Similar remarks can be made for the multiplication/division; for
+instance, the following formula is incorrect:
+
+.. math:: \{im2b1,im2b2\} * \{1,2\}
+
+whereas this one is correct:
+
+.. math:: \{im2b1,im2b2\} * \{1,2\}'
+
+or in more simple terms (and only if im2 contains two components):
+
+.. math:: im2* \{1,2\}'
+
+Concerning division, this operation is not originally defined between
+two vectors (see next section “New operators and functions”
+-[ssec:operators]-).
+
+Now, let’s go back to the first formula: this one specifies the addition
+of two images band to band. With muParserX lib, we can now define such
+operation with only one formula, instead of many formulas (as many as
+the number of bands). We call this new functionality the **batch mode**,
+which directly arises from the introduction of vectors within muParserX
+framework.
+
+Finally, let’s say a few words about neighbourhood variables. These
+variables are defined for each particular input, and for each particular
+band. The two last numbers, kxp, indicate the size of the neighbourhood.
+All neighbourhoods are centred: this means that k and p can only be odd
+numbers. Moreover, k represents the dimension in the x direction (number
+of columns), and p the dimension in the y direction (number of rows).
+For instance, im1b3N3x5 represents the following neighbourhood:
+
++-----+-----+-----+
+| .   | .   | .   |
++=====+=====+=====+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+| .   | .   | .   |
++-----+-----+-----+
+
+[correctness]
+
+Fundamentally, a neighbourhood is represented as a matrix inside the
+muParserX framework; so the remark about mathematically well-defined
+formulas still stands.
+
+New operators and functions
+---------------------------
+
+New operators and functions have been implemented within
+BandMathImageFilterX. These ones can be divided into two categories.
+
+-  adaptation of existing operators/functions, that were not originally
+   defined for vectors and matrices (for instance cos, sin, ...). These
+   new operators/ functions keep the original names to which we add the
+   prefix “v” for vector (vcos, vsin, ...) .
+
+-  truly new operators/functions.
+
+Concerning the last category, here is a list of implemented operators or
+functions (they are all implemented in otbParserXPlugins.h/.cxx files
+-OTB/Code/Common-):
+
+**Operators div and dv** The first operator allows the definition of an
+element-wise division of two vectors (and even matrices), provided that
+they have the same dimensions. The second one allows the definition of
+the division of a vector/matrix by a scalar (components are divided by
+the same unique value). For instance:
+
+.. math:: im1 ~ div ~ im2
+
+.. math:: im1 ~ dv ~ 2.0
+
+**Operators mult and mlt** These operators are the duals of the previous
+ones. For instance:
+
+.. math:: im1 ~  mult ~ im2
+
+.. math:: im1 ~  mlt ~ 2.0
+
+Note that the operator ’\*’ could have been used instead of ’pw’ one.
+But ’pw’ is a little bit more permisive, and can tolerate
+one-dimensional vector as right element.
+
+**Operators pow and pw** The first operator allows the definition of an
+element-wise exponentiation of two vectors (and even matrices), provided
+that they have the same dimensions. The second one allows the definition
+of the division of a vector/matrix by a scalar (components are
+exponentiated by the same unique value). For instance:
+
+.. math:: im1 ~ pow ~ im2
+
+.. math:: im1 ~ pw ~ 2.0
+
+**Function bands** This function allows to select specific bands from an
+image, and/or to rearrange them in a new vector; for instance:
+
+.. math:: bands(im1,\{1,2,1,1\})
+
+produces a vector of 4 components made of band 1, band 2, band 1 and
+band 1 values from the first input. Note that curly brackets must be
+used in order to select the desired band indices.
+
+** Function dotpr ** This function allows the dot product between two
+vectors or matrices (actually in our case, a kernel and a neighbourhood
+of pixels):
+
+.. math:: \sum_{(i,j)} m_1(i,j)*m_2(i,j)
+
+For instance:
+
+.. math:: dotpr(kernel1,im1b1N3x5)
+
+is correct provided that kernel1 and im1b1N3x5 have the same dimensions.
+The function can take as many neighbourhoods as needed in inputs.
+
+**Function mean** This function allows to compute the mean value of a
+given vector or neighborhood (the function can take as many inputs as
+needed; one mean value is computed per input). For instance:
+
+.. math:: mean(im1b1N3x3,im1b2N3x3,im1b3N3x3,im1b4N3x3)
+
+Note: a limitation coming from muparserX itself makes impossible to pass
+all those neighborhoods with a unique variable.
+
+**Function var** This function allows to compute the variance of a given
+vector or neighborhood (the function can take as many inputs as needed;
+one var value is computed per input). For instance:
+
+.. math:: var(im1b1N3x3)
+
+**Function median** This function allows to compute the median value of
+a given vector or neighborhood (the function can take as many inputs as
+needed; one median value is computed per input). For instance:
+
+.. math:: median(im1b1N3x3)
+
+**Function corr** This function allows to compute the correlation
+between two vectors or matrices of the same dimensions (the function
+takes two inputs). For instance:
+
+.. math:: corr(im1b1N3x3,im1b2N3x3)
+
+**Function maj** This function allows to compute the most represented
+element within a vector or a matrix (the function can take as many
+inputs as needed; one maj element value is computed per input). For
+instance:
+
+.. math:: maj(im1b1N3x3,im1b2N3x3)
+
+**Function vmin and vmax** These functions allow to compute the min or
+max value of a given vector or neighborhood (only one input). For
+instance:
+
+.. math:: (vmax(im3b1N3x5)+vmin(im3b1N3x5)) ~ div ~ \{2.0\}
+
+**Function cat** This function allows to concatenate the results of
+several expressions into a multidimensional vector, whatever their
+respective dimensions (the function can take as many inputs as needed).
+For instance:
+
+.. math:: cat(im3b1,vmin(im3b1N3x5),median(im3b1N3x5),vmax(im3b1N3x5))
+
+Note: the user should prefer the use of semi-colons (;) when setting
+expressions, instead of directly use this function. The filter or the
+application will call the function ’cat’ automatically. For instance:
+
+.. math:: filter->SetExpression("im3b1 ; vmin(im3b1N3x5) ; median(im3b1N3x5) ; vmax(im3b1N3x5)");
+
+Please, also refer to the next section “Application Programming
+Interface” ([ssec:API]).
+
+**Function ndvi** This function implements the classical normalized
+difference vegetation index; it tkaes two inputs. For instance:
+
+.. math:: ndvi(im1b1,im1b4)
+
+First argument is related to the visible red band, and the second one to
+the near-infrareds band.
+
+The table below summarises the different functions and operators.
+
+Functions and operators summary:
+
++----------------+-------------------------------------------------------------------------------+
+| Variables      | Remark                                                                        |
++================+===============================================================================+
+| ndvi           | two inputs                                                                    |
++----------------+-------------------------------------------------------------------------------+
+| bands          | two inputs; length of second vector input gives the dimension of the output   |
++----------------+-------------------------------------------------------------------------------+
+| dotptr         | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| cat            | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| mean           | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| var            | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| median         | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| maj            | many inputs                                                                   |
++----------------+-------------------------------------------------------------------------------+
+| corr           | two inputs                                                                    |
++----------------+-------------------------------------------------------------------------------+
+| div and dv     | operators                                                                     |
++----------------+-------------------------------------------------------------------------------+
+| mult and mlt   | operators                                                                     |
++----------------+-------------------------------------------------------------------------------+
+| pow and pw     | operators                                                                     |
++----------------+-------------------------------------------------------------------------------+
+| vnorm          | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vabs           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vmin           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vmax           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vcos           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vsin           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vtan           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vtanh          | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vsinh          | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vcosh          | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vlog           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vlog10         | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vexp           | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+| vsqrt          | adapation of an existing function to vectors : one input                      |
++----------------+-------------------------------------------------------------------------------+
+
+[variables]
+
+Application Programming Interface (API)
+---------------------------------------
+
+In this section, we make some comments about the public member functions
+of the new band math filter.
+
+::
+
+    /** Set the nth filter input with or without a specified associated variable name */
+    void SetNthInput( unsigned int idx, const ImageType * image);
+    void SetNthInput( unsigned int idx, const ImageType * image, const std::string& varName);
+
+    /** Return a pointer on the nth filter input */
+    ImageType * GetNthInput(unsigned int idx);
+
+Refer to the section “Syntax : first elements” ([ssec:syntax]) where the
+two first functions have already been commented. The function
+GetNthInput is quite clear to understand.
+
+::
+
+    /** Set an expression to be parsed */
+    void SetExpression(const std::string& expression);
+
+Each time the function SetExpression is called, a new expression is
+pushed inside the filter. **There are as many outputs as there are
+expressions. The dimensions of the outputs (number of bands) are totally
+dependent on the dimensions of the related expressions (see also last
+remark of the section “Syntax : first element” -[ssec:syntax]-).** Thus,
+the filter always performs a pre-evaluation of each expression, in order
+to guess how to allocate the outputs.
+
+The concatenation of the results of many expressions (whose results can
+have different dimensions) into one unique output is possible. For that
+puropose, semi-colons (“;”) are used as separating characters. For
+instance:
+
+.. math:: filter->SetExpression("im1 + im2 ; im1b1*im2b1");
+
+will produce a unique output (one expression) of many bands (actually,
+number of bands of im1 + 1).
+
+::
+
+    /** Return the nth expression to be parsed */
+    std::string GetExpression(int) const;
+
+This function allows the user to get any expression by its ID number.
+
+::
+
+    /** Set a matrix (or a vector) */
+    void SetMatrix(const std::string& name, const std::string& definition);
+
+This function allows the user to set new vectors or matrices. This is
+particularly useful when the user wants to use the dotpr function (see
+previous section). First argument is related to the name of the
+variable, and the second one to the definition of the vector/matrix. The
+definition is done by a string, where first and last elements must be
+curly brackets (“{” and “}”). Different elements of a row are separated
+by commas (“,”), and different rows are separated by semi-colons (“;”).
+For instance:
+
+::
+
+    filter->SetMatrix("kernel1","{ 0.1 , 0.2 , 0.3 ; 0.4 , 0.5 , 0.6 ; \
+    0.7 , 0.8 , 0.9 ; 1.0 , 1.1 , 1.2 ; 1.3 , 1.4 , 1.5 }");
+
+defines the kernel1, whose elements are given as follows:
+
++-------+-------+-------+
+| 0,1   | 0,2   | 0,3   |
++=======+=======+=======+
+| 0,4   | 0,5   | 0,6   |
++-------+-------+-------+
+| 0,7   | 0,8   | 0,9   |
++-------+-------+-------+
+| 1,0   | 1,1   | 1,2   |
++-------+-------+-------+
+| 1,3   | 1,4   | 1,5   |
++-------+-------+-------+
+
+Definition of kernel1.
+
+
+[correctness]
+
+::
+
+    /** Set a constant */
+    void SetConstant(const std::string& name, double value);
+
+This function allows the user to set new constants.
+
+::
+
+    /** Return the variable and constant names */
+    std::vector<std::string> GetVarNames() const;
+
+This function allows the user to get the list of the variable and
+constant names, in the form of a std::vector of strings.
+
+::
+
+      /** Import constants and expressions from a given filename */
+      void ImportContext(const std::string& filename);
+
+This function allows the user to define new constants and/or expressions
+(context) by using a txt file with a specific syntax. For the definition
+of constants, the following pattern must be observed: #type name value.
+For instance:
+
+#F expo 1.1 #M kernel1 { 0.1 , 0.2 , 0.3 ; 0.4 , 0.5 , 0.6 ; 0.7 , 0.8 ,
+0.9 ; 1 , 1.1 , 1.2 ; 1.3 , 1.4 , 1.5 }
+
+As we can see, #I/#F allows the definition of an integer/float constant,
+whereas #M allows the definition of a vector/matrix. It is also possible
+to define expressions within the same txt file, with the pattern #E
+expr. For instance:
+
+#F expo 1.1 #M kernel1 0.1 , 0.2 , 0.3 ; 0.4 , 0.5 , 0.6 ; 0.7 , 0.8 ,
+0.9 ; 1 , 1.1 , 1.2 ; 1.3 , 1.4 , 1.5 #E dotpr(kernel1,im1b1N3x5)
+
+::
+
+      /** Export constants and expressions to a given filename */
+      void ExportContext(const std::string& filename);
+
+This function allows the user to export a txt file that saves its
+favorite constant or expression definitions. Such a file will be
+reusable by the ImportContext function (see above).
+
+Please, also refer to the section dedicated to application.
diff --git a/Documentation/Cookbook/rst/recipes/featextract.rst b/Documentation/Cookbook/rst/recipes/featextract.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8e4c73c38653d50a89f5d7f493526ed6ecb1192f
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/featextract.rst
@@ -0,0 +1,526 @@
+Feature extraction
+==================
+
+As described in the OTB Software Guide, the term *Feature Extraction*
+refers to techniques aiming at extracting added value information from
+images. These extracted items named *features* can be local statistical
+moments, edges, radiometric indices, morphological and textural
+properties. For example, such features can be used as input data for
+other image processing methods like *Segmentation* and *Classification*.
+
+Local statistics extraction
+---------------------------
+
+This application computes the 4 local statistical moments on every pixel
+in the selected channel of the input image, over a specified
+neighborhood. The output image is multi band with one statistical moment
+(feature) per band. Thus, the 4 output features are the Mean, the
+Variance, the Skewness and the Kurtosis. They are provided in this exact
+order in the output image.
+
+The *LocalStatisticExtraction* application has the following input
+parameters:
+
+-``-in`` the input image to compute the features on
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-``-radius`` the computational window radius (default value is 3
+   pixels)
+
+-``-out`` the output image containing the local statistical moments
+
+The application can be used like this:
+
+::
+
+    otbcli_LocalStatisticExtraction  -in        InputImage
+                                     -channel   1
+                                     -radius    3
+                                     -out       OutputImage
+
+Edge extraction
+---------------
+
+This application Computes edge features on every pixel in the selected
+channel of the input image.
+
+The *EdgeExtraction* application has the following input parameters:
+
+-``-in`` the input image to compute the features on
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+- ``-filter`` the choice of edge detection method (gradient/sobel/touzi) (default value is gradient)   
+
+ -``(-filter.touzi.xradius)`` the X Radius of the Touzi processing neighborhood (only if filter==touzi) (default value is 1 pixel) __
+
+ - ``(-filter.touzi.yradius)`` the Y Radius of the Touzi processing neighborhood (only if filter==touzi) (default value is 1 pixel)   
+
+-``-out`` the output mono band image containing the edge features
+
+The application can be used like this:
+
+::
+
+    otbcli_EdgeExtraction  -in        InputImage
+                           -channel   1
+                           -filter    sobel
+                           -out       OutputImage
+
+or like this if filter==touzi:
+
+::
+
+    otbcli_EdgeExtraction  -in                    InputImage
+                           -channel               1
+                           -filter                touzi
+                           -filter.touzi.xradius  2
+                           -filter.touzi.yradius  2 
+                           -out                   OutputImage
+
+Radiometric indices extraction
+------------------------------
+
+This application computes radiometric indices using the channels of the
+input image. The output is a multi band image into which each channel is
+one of the selected indices.
+
+The *RadiometricIndices* application has the following input parameters:
+
+-``-in`` the input image to compute the features on
+
+-``-out`` the output image containing the radiometric indices
+
+-``-channels.blue`` the Blue channel index in the input image (default
+   value is 1)
+
+-``-channels.green`` the Green channel index in the input image
+   (default value is 1)
+
+-``-channels.red`` the Red channel index in the input image (default
+   value is 1)
+
+-``-channels.nir`` the Near Infrared channel index in the input image
+   (default value is 1)
+
+-``-channels.mir`` the Mid-Infrared channel index in the input image
+   (default value is 1)
+
+-``-list`` the list of available radiometric indices (default value is
+   Vegetation:NDVI)
+
+The available radiometric indices to be listed into -list with their
+relevant channels in brackets are:
+
+::
+
+    Vegetation:NDVI - Normalized difference vegetation index (Red, NIR)
+    Vegetation:TNDVI - Transformed normalized difference vegetation index (Red, NIR)
+    Vegetation:RVI - Ratio vegetation index (Red, NIR)
+    Vegetation:SAVI - Soil adjusted vegetation index (Red, NIR)
+    Vegetation:TSAVI - Transformed soil adjusted vegetation index (Red, NIR)
+    Vegetation:MSAVI - Modified soil adjusted vegetation index (Red, NIR)
+    Vegetation:MSAVI2 - Modified soil adjusted vegetation index 2 (Red, NIR)
+    Vegetation:GEMI - Global environment monitoring index (Red, NIR)
+    Vegetation:IPVI - Infrared percentage vegetation index (Red, NIR)
+
+    Water:NDWI - Normalized difference water index (Gao 1996) (NIR, MIR)
+    Water:NDWI2 - Normalized difference water index (Mc Feeters 1996) (Green, NIR)
+    Water:MNDWI - Modified normalized difference water index (Xu 2006) (Green, MIR)
+    Water:NDPI - Normalized difference pond index (Lacaux et al.) (MIR, Green)
+    Water:NDTI - Normalized difference turbidity index (Lacaux et al.) (Red, Green)
+
+    Soil:RI - Redness index (Red, Green)
+    Soil:CI - Color index (Red, Green)
+    Soil:BI - Brightness index (Red, Green)
+    Soil:BI2 - Brightness index 2 (NIR, Red, Green)
+
+The application can be used like this, which leads to an output image
+with 3 bands, respectively with the Vegetation:NDVI, Vegetation:RVI and
+Vegetation:IPVI radiometric indices in this exact order:
+
+::
+
+    otbcli_RadiometricIndices -in             InputImage
+                              -out            OutputImage
+                              -channels.red   3
+                              -channels.green 2
+                              -channels.nir   4
+                              -list           Vegetation:NDVI Vegetation:RVI
+                                              Vegetation:IPVI 
+
+or like this, which leads to a single band output image with the
+Water:NDWI2 radiometric indice:
+
+::
+
+    otbcli_RadiometricIndices -in             InputImage
+                              -out            OutputImage
+                              -channels.red   3
+                              -channels.green 2
+                              -channels.nir   4
+                              -list           Water:NDWI2 
+
+Morphological features extraction
+---------------------------------
+
+Morphological features can be highlighted by using image filters based
+on mathematical morphology either on binary or gray scale images.
+
+Binary morphological operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application performs binary morphological operations (dilation,
+erosion, opening and closing) on a mono band image with a specific
+structuring element (a ball or a cross) having one radius along X and
+another one along Y. NB: the cross shaped structuring element has a
+fixed radius equal to 1 pixel in both X and Y directions.
+
+The *BinaryMorphologicalOperation* application has the following input
+parameters:
+
+-``-in`` the input image to be filtered
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-``-structype`` the choice of the structuring element type
+   (ball/cross) (default value is ball)
+
+-``(-structype.ball.xradius)`` the ball structuring element X Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-``(-structype.ball.yradius)`` the ball structuring element Y Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-``-filter`` the choice of the morphological operation
+   (dilate/erode/opening/closing) (default value is dilate)
+
+-``(-filter.dilate.foreval)`` the foreground value for the dilation
+   (idem for filter.erode/opening/closing) (default value is 1)
+
+-``(-filter.dilate.backval)`` the background value for the dilation
+   (idem for filter.erode/opening/closing) (default value is 0)
+
+-``-out`` the output filtered image
+
+The application can be used like this:
+
+::
+
+    otbcli_BinaryMorphologicalOperation  -in                     InputImage
+                                         -channel                1
+                                         -structype              ball
+                                         -structype.ball.xradius 10
+                                         -structype.ball.yradius 5
+                                         -filter                 opening
+                                         -filter.opening.foreval 1.0
+                                         -filter.opening.backval 0.0
+                                         -out                    OutputImage
+
+Gray scale morphological operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application performs morphological operations (dilation, erosion,
+opening and closing) on a gray scale mono band image with a specific
+structuring element (a ball or a cross) having one radius along X and
+another one along Y. NB: the cross shaped structuring element has a
+fixed radius equal to 1 pixel in both X and Y directions.
+
+The *GrayScaleMorphologicalOperation* application has the following
+input parameters:
+
+-``-in`` the input image to be filtered
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-``-structype`` the choice of the structuring element type
+   (ball/cross) (default value is ball)
+
+-``(-structype.ball.xradius)`` the ball structuring element X Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-``(-structype.ball.yradius)`` the ball structuring element Y Radius
+   (only if structype==ball) (default value is 5 pixels)
+
+-``-filter`` the choice of the morphological operation
+   (dilate/erode/opening/closing) (default value is dilate)
+
+-``-out`` the output filtered image
+
+The application can be used like this:
+
+::
+
+    otbcli_GrayScaleMorphologicalOperation  -in                     InputImage
+                                            -channel                1
+                                            -structype              ball
+                                            -structype.ball.xradius 10
+                                            -structype.ball.yradius 5
+                                            -filter                 opening
+                                            -out                    OutputImage
+
+Textural features extraction
+----------------------------
+
+Texture features can be extracted with the help of image filters based
+on texture analysis methods like Haralick and structural feature set
+(SFS).
+
+Haralick texture features
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This application computes Haralick, advanced and higher order texture
+features on every pixel in the selected channel of the input image. The
+output image is multi band with a feature per band.
+
+The *HaralickTextureExtraction* application has the following input
+parameters:
+
+-``-in`` the input image to compute the features on
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-``-texture`` the texture set selection [simple/advanced/higher]
+   (default value is simple)
+
+-``-parameters.min`` the input image minimum (default value is 0)
+
+-``-parameters.max`` the input image maximum (default value is 255)
+
+-``-parameters.xrad`` the X Radius of the processing neighborhood
+   (default value is 2 pixels)
+
+-``-parameters.yrad`` the Y Radius of the processing neighborhood
+   (default value is 2 pixels)
+
+-``-parameters.xoff`` the :math:`\Delta`\ X Offset for the
+   co-occurrence computation (default value is 1 pixel)
+
+-``-parameters.yoff`` the :math:`\Delta`\ Y Offset for the
+   co-occurrence computation (default value is 1 pixel)
+
+-``-parameters.nbbin`` the number of bin per axis for histogram
+   generation (default value is 8)
+
+-``-out`` the output multi band image containing the selected texture
+   features (one feature per band)
+
+The available values for -texture with their relevant features are:
+
+-``-texture=simple:`` In this case, 8 local Haralick textures features
+   will be processed. The 8 output image channels are: Energy, Entropy,
+   Correlation, Inverse Difference Moment, Inertia, Cluster Shade,
+   Cluster Prominence and Haralick Correlation. They are provided in
+   this exact order in the output image. Thus, this application computes
+   the following Haralick textures over a neighborhood with user defined
+   radius. To improve the speed of computation, a variant of Grey Level
+   Co-occurrence Matrix(GLCM) called Grey Level Co-occurrence Indexed
+   List (GLCIL) is used. Given below is the mathematical explanation on
+   the computation of each textures. Here :math:`g(i, j)` is the
+   frequency of element in the GLCIL whose index is i, j. GLCIL stores a
+   pair of frequency of two pixels taken from the given offset and the
+   cell index (i, j) of the pixel in the neighborhood window. :(where
+   each element in GLCIL is a pair of pixel index and it’s frequency,
+   :math:`g(i, j)` is the frequency value of the pair having index is
+   i, j).
+
+   “Energy” :math:`= f_1 = \sum_{i, j}g(i, j)^2`
+
+   “Entropy” :math:`= f_2 = -\sum_{i, j}g(i, j) \log_2 g(i, j)`, or 0
+   if :math:`g(i, j) = 0`
+
+   “Correlation”
+   :math:`= f_3 = \sum_{i, j}\frac{(i - \mu)(j - \mu)g(i, j)}{\sigma^2}`
+
+   “Inverse Difference Moment”
+   :math:`= f_4 = \sum_{i, j}\frac{1}{1 + (i - j)^2}g(i, j)`
+
+   “Inertia” :math:`= f_5 = \sum_{i, j}(i - j)^2g(i, j)` (sometimes
+   called “contrast”)
+
+   “Cluster Shade”
+   :math:`= f_6 = \sum_{i, j}((i - \mu) + (j - \mu))^3 g(i, j)`
+
+   “Cluster Prominence”
+   :math:`= f_7 = \sum_{i, j}((i - \mu) + (j - \mu))^4 g(i, j)`
+
+   “Haralick’s Correlation”
+   :math:`= f_8 = \frac{\sum_{i, j}(i, j) g(i, j) -\mu_t^2}{\sigma_t^2}`
+   where :math:`\mu_t` and :math:`\sigma_t` are the mean and standard
+   deviation of the row (or column, due to symmetry) sums. Above,
+   :math:`\mu =` (weighted pixel average)
+   :math:`= \sum_{i, j}i \cdot g(i, j) = \sum_{i, j}j \cdot g(i, j)`
+   (due to matrix symmetry), and :math:`\sigma =` (weighted pixel
+   variance)
+   :math:`= \sum_{i, j}(i - \mu)^2 \cdot g(i, j) = \sum_{i, j}(j - \mu)^2 \cdot g(i, j)`
+   (due to matrix symmetry).
+
+-``-texture=advanced:`` In this case, 10 advanced texture features
+   will be processed. The 10 output image channels are: Mean, Variance,
+   Dissimilarity, Sum Average, Sum Variance, Sum Entropy, Difference of
+   Entropies, Difference of Variances, IC1 and IC2. They are provided in
+   this exact order in the output image. The textures are computed over
+   a sliding window with user defined radius.
+
+   To improve the speed of computation, a variant of Grey Level
+   Co-occurrence Matrix(GLCM) called Grey Level Co-occurrence Indexed
+   List (GLCIL) is used. Given below is the mathematical explanation on
+   the computation of each textures. Here :math:`g(i, j)` is the
+   frequency of element in the GLCIL whose index is i, j. GLCIL stores a
+   pair of frequency of two pixels taken from the given offset and the
+   cell index (i, j) of the pixel in the neighborhood window. :(where
+   each element in GLCIL is a pair of pixel index and it’s frequency,
+   :math:`g(i, j)` is the frequency value of the pair having index is
+   i, j).
+
+   “Mean” :math:`= \sum_{i, j}i g(i, j)`
+
+   “Sum of squares: Variance”
+   :math:`= f_4 = \sum_{i, j}(i - \mu)^2 g(i, j)`
+
+   “Dissimilarity” :math:`= f_5 = \sum_{i, j}(i - j) g(i, j)^2`
+
+   “Sum average” :math:`= f_6 = -\sum_{i}i g_{x+y}(i)`
+
+   “Sum Variance” :math:`= f_7 = \sum_{i}(i - f_8)^2 g_{x+y}(i)`
+
+   “Sum Entropy” :math:`= f_8 = -\sum_{i}g_{x+y}(i) log (g_{x+y}(i))`
+
+   “Difference variance” :math:`= f_10 = variance of g_{x-y}(i)`
+
+   “Difference entropy”
+   :math:`= f_11 = -\sum_{i}g_{x-y}(i) log (g_{x-y}(i))`
+
+   “Information Measures of Correlation IC1”
+   :math:`= f_12 = \frac{f_9 - HXY1}{H}`
+
+   “Information Measures of Correlation IC2”
+   :math:`= f_13 = \sqrt{1 - \exp{-2}|HXY2 - f_9|}`
+
+   Above, :math:`\mu =` (weighted pixel average)
+   :math:`= \sum_{i, j}i \cdot g(i, j) =  \sum_{i, j}j \cdot g(i, j)`
+   (due to matrix summetry), and
+
+   :math:`g_{x+y}(k) =  \sum_{i}\sum_{j}g(i)` where :math:`i+j=k`
+   and :math:`k = 2, 3, .., 2N_{g}` and
+
+   :math:`g_{x-y}(k) =  \sum_{i}\sum_{j}g(i)` where :math:`i-j=k`
+   and :math:`k = 0, 1, .., N_{g}-1`
+
+-``-texture=higher:`` In this case, 11 local higher order statistics
+   texture coefficients based on the grey level run-length matrix will
+   be processed. The 11 output image channels are: Short Run Emphasis,
+   Long Run Emphasis, Grey-Level Nonuniformity, Run Length
+   Nonuniformity, Run Percentage, Low Grey-Level Run Emphasis, High
+   Grey-Level Run Emphasis, Short Run Low Grey-Level Emphasis, Short Run
+   High Grey-Level Emphasis, Long Run Low Grey-Level Emphasis and Long
+   Run High Grey-Level Emphasis. They are provided in this exact order
+   in the output image. Thus, this application computes the following
+   Haralick textures over a sliding window with user defined radius:
+   (where :math:`p(i, j)` is the element in cell i, j of a normalized
+   Run Length Matrix, :math:`n_r` is the total number of runs and
+   :math:`n_p` is the total number of pixels):
+
+   “Short Run Emphasis”
+   :math:`= SRE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{j^2}`
+
+   “Long Run Emphasis”
+   :math:`= LRE = \frac{1}{n_r} \sum_{i, j}p(i, j) * j^2`
+
+   “Grey-Level Nonuniformity”
+   :math:`= GLN = \frac{1}{n_r} \sum_{i} \left( \sum_{j}{p(i, j)} \right)^2`
+
+   “Run Length Nonuniformity”
+   :math:`= RLN = \frac{1}{n_r} \sum_{j} \left( \sum_{i}{p(i, j)} \right)^2`
+
+   “Run Percentage” :math:`= RP = \frac{n_r}{n_p}`
+
+   “Low Grey-Level Run Emphasis”
+   :math:`= LGRE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{i^2}`
+
+   “High Grey-Level Run Emphasis”
+   :math:`= HGRE = \frac{1}{n_r} \sum_{i, j}p(i, j) * i^2`
+
+   “Short Run Low Grey-Level Emphasis”
+   :math:`= SRLGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j)}{i^2 j^2}`
+
+   “Short Run High Grey-Level Emphasis”
+   :math:`= SRHGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j) * i^2}{j^2}`
+
+   “Long Run Low Grey-Level Emphasis”
+   :math:`= LRLGE = \frac{1}{n_r} \sum_{i, j}\frac{p(i, j) * j^2}{i^2}`
+
+   “Long Run High Grey-Level Emphasis”
+   :math:`= LRHGE = \frac{1}{n_r} \sum_{i, j} p(i, j) i^2 j^2`
+
+The application can be used like this:
+
+::
+
+    otbcli_HaralickTextureExtraction  -in             InputImage
+                                      -channel        1
+                                      -texture        simple
+                                      -parameters.min 0
+                                      -parameters.max 255
+                                      -out            OutputImage
+
+SFS texture extraction
+~~~~~~~~~~~~~~~~~~~~~~
+
+This application computes Structural Feature Set textures on every pixel
+in the selected channel of the input image. The output image is multi
+band with a feature per band. The 6 output texture features are
+SFS’Length, SFS’Width, SFS’PSI, SFS’W-Mean, SFS’Ratio and SFS’SD. They
+are provided in this exact order in the output image.
+
+It is based on line direction estimation and described in the following
+publication. Please refer to Xin Huang, Liangpei Zhang and Pingxiang Li
+publication, Classification and Extraction of Spatial Features in Urban
+Areas Using High-Resolution Multispectral Imagery. IEEE Geoscience and
+Remote Sensing Letters, vol. 4, n. 2, 2007, pp 260-264.
+
+The texture is computed for each pixel using its neighborhood. User can
+set the spatial threshold that is the max line length, the spectral
+threshold that is the max difference authorized between a pixel of the
+line and the center pixel of the current neighborhood. The adjustement
+constant alpha and the ratio Maximum Consideration Number, which
+describes the shape contour around the central pixel, are used to
+compute the :math:`w - mean` value.
+
+The *SFSTextureExtraction* application has the following input
+parameters:
+
+-``-in`` the input image to compute the features on
+
+-``-channel`` the selected channel index in the input image to be
+   processed (default value is 1)
+
+-``-parameters.spethre`` the spectral threshold (default value is 50)
+
+-``-parameters.spathre`` the spatial threshold (default value is 100
+   pixels)
+
+-``-parameters.nbdir`` the number of directions (default value is 20)
+
+-``-parameters.alpha`` the alpha value (default value is 1)
+
+-``-parameters.maxcons`` the ratio Maximum Consideration Number
+   (default value is 5)
+
+-``-out`` the output multi band image containing the selected texture
+   features (one feature per band)
+
+The application can be used like this:
+
+::
+
+    otbcli_SFSTextureExtraction -in             InputImage
+                                -channel        1
+                                -out            OutputImage
+
diff --git a/Documentation/Cookbook/rst/recipes/improc.rst b/Documentation/Cookbook/rst/recipes/improc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9a133b5deaa3da4fcd80edeedd7e88cecf52530b
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/improc.rst
@@ -0,0 +1,310 @@
+Image processing and information extraction
+===========================================
+
+Simple calculus with channels
+-----------------------------
+
+The *BandMath* application provides a simple and efficient way to
+perform band operations. The command line application and the
+corresponding Monteverdi module (shown in the section [Band:sub:`m`\ ath
+module]) are based on the same standards. It computes a band wise
+operation according to a user defined mathematical expression. The
+following code computes the absolute difference between first bands of
+two images.
+
+::
+
+    otbcli_BandMath -il input_image_1 input_image_2
+                    -exp "abs(im1b1 - im2b1)"
+                    -out output_image
+
+The naming convention “im[x]b[y]” designates the y\ :sup:`th` band of the x\ :sup:`th` input image.
+
+The *BandMath* application embeds built-in operators and functions
+listed in `muparser documentation <http://muparser.sourceforge.net/mup_features.html#idDef2>`_ thus
+allowing a vast choice of possible operations.
+
+Segmentation
+------------
+
+Segmenting objects across a very high resolution scene and with a
+controlled quality is a difficult task for which no method has reached a
+sufficient level of performance to be considered as operational.
+
+Even if we leave aside the question of segmentation quality and consider
+that we have a method performing reasonably well on our data and objects
+of interest, the task of scaling up segmentation to real very high
+resolution data is itself challenging. First, we can not load the whole
+data into memory, and there is a need for on the flow processing which
+does not cope well with traditional segmentation algorithms. Second, the
+result of the segmentation process itself is difficult to represent and
+manipulate efficiently.
+
+The experience of segmenting large remote sensing images is packed into
+a single *Segmentation* in **OTB Applications** .
+
+You can find more information about this application in this `blog <http://blog.orfeo-toolbox.org/preview/coming-next-large-scale-segmentation>`_ .
+
+Large-Scale Mean-Shift (LSMS) segmentation
+------------------------------------------
+
+LSMS is a segmentation workflow which allows to perform tile-wise
+segmentation of very large image with theoretical guarantees of getting
+identical results to those without tiling. It has been developed by
+David Youssefi and Julien Michel during David internship at CNES and is
+to be published soon.
+
+The workflow consists in chaining 3 or 4 dedicated applications and
+produces a GIS vector file with artifact-free polygons corresponding to
+the segmented image, as well as mean and variance of the radiometry of
+each band for each polygon.
+
+Step 1: Mean-Shift Smoothing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The first step of the workflow is to perform Mean-Shift smoothing with
+the *MeanShiftSmoothing* application:
+
+::
+
+    otbcli_MeanShiftSmoothing -in input_image 
+                              -fout filtered_range.tif 
+                              -foutpos filtered_spat.tif 
+                              -ranger 30 
+                              -spatialr 5 
+                              -maxiter 10 
+                              -modesearch 0
+
+Note that the *modesearch* option should be disabled, and that the
+*foutpos* parameter is optional: it can be activated if you want to
+perform the segmentation based on both spatial and range modes.
+
+This application will smooth large images by streaming them, and
+deactivating the *modesearch* will guarantee that the results will not
+depend on the streaming scheme. Please also note that the *maxiter* is
+used to set the margin to ensure these identical results, and as such
+increasing the *maxiter* may have an additional impact on processing
+time.
+
+Step 2: Segmentation
+~~~~~~~~~~~~~~~~~~~~
+
+The next step is to produce an initial segmentation based on the
+smoothed images produced by the *MeanShiftSmoothing* application. To do
+so, the *LSMSSegmentation* will process them by tiles whose dimensions
+are defined by the *tilesizex* and *tilesizey* parameters, and by
+writing intermediate images to disk, thus keeping the memory consumption
+very low throughout the process. The segmentation will group together
+adjacent pixels whose range distance is below the *ranger* parameter and
+(optionally) spatial distance is below the *spatialr* parameter.
+
+::
+
+    otbcli_LSMSSegmentation -in filtered_range.tif
+                            -inpos filtered_spatial.tif
+                            -out  segmentation.tif uint32 
+                            -ranger 30 
+                            -spatialr 5 
+                            -minsize 0 
+                            -tilesizex 256 
+                            -tilesizey 256
+
+Note that the final segmentation image may contains a very large number
+of segments, and the *uint32* image type should therefore be used to
+ensure that there will be enough labels to index those segments. The
+*minsize* parameter will filter segments whose size in pixels is below
+its value, and their labels will be set to 0 (nodata).
+
+Please note that the output segmented image may look patchy, as if there
+were tiling artifacts: this is because segments are numbered
+sequentially with respect to the order in which tiles are processed. You
+will see after the result of the vectorization step that there are no
+artifacts in the results.
+
+The *LSMSSegmentation* application will write as many intermediate files
+as tiles needed during processing. As such, it may require twice as free
+disk space as the final size of the final image. The *cleanup* option
+(active by default) will clear the intermediate files during the
+processing as soon as they are not needed anymore. By default, files
+will be written to the current directory. The *tmpdir* option allows to
+specify a different directory for these intermediate files.
+
+Step 3 (optional): Merging small regions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *LSMSSegmentation* application allows to filter out small segments.
+In the output segmented image, those segments will be removed and
+replaced by the background label (0). Another solution to deal with the
+small regions is to merge them with the closest big enough adjacent
+region in terms of radiometry. This is handled by the
+*LSMSSmallRegionsMerging* application, which will output a segmented
+image where small regions have been merged. Again, the *uint32* image
+type is advised for this output image.
+
+::
+
+    otbcli_LSMSSmallRegionsMerging -in filtered_range.tif
+                                   -inseg segementation.tif
+                                   -out segmentation_merged.tif uint32 
+                                   -minsize 10 
+                                   -tilesizex 256 
+                                   -tilesizey 256
+
+The *minsize* parameter allows to specify the threshold on the size of
+the regions to be merged. Like the *LSMSSegmentation* application, this
+application will process the input images tile-wise to keep resources
+usage low, with the guarantee of identical results. You can set the tile
+size using the *tilesizex* and *tilesizey* parameters. However unlike
+the *LSMSSegmentation* application, it does not require to write any
+temporary file to disk.
+
+Step 4: Vectorization
+~~~~~~~~~~~~~~~~~~~~~
+
+The last step of the LSMS workflow consists in the vectorization of the
+segmented image into a GIS vector file. This vector file will contain
+one polygon per segment, and each of these polygons will hold additional
+attributes denoting the label of the original segment, the size of the
+segment in pixels, and the mean and variance of each band over the
+segment. The projection of the output GIS vector file will be the same
+as the projection from the input image (if input image has no
+projection, so does the output GIS file).
+
+::
+
+    otbcli_LSMSVectorization -in input_image 
+                             -inseg segmentation_merged.tif 
+                             -out segmentation_merged.shp 
+                             -tilesizex 256 
+                             -tilesizey 256
+
+This application will process the input images tile-wise to keep
+resources usage low, with the guarantee of identical results. You can
+set the tile size using the *tilesizex* and *tilesizey* parameters.
+However unlike the *LSMSSegmentation* application, it does not require
+to write any temporary file to disk.
+
+Dempster Shafer based Classifier Fusion
+---------------------------------------
+
+This framework is dedicated to perform cartographic validation starting
+from the result of a detection (for example a road extraction), enhance
+the results fiability by using a classifier fusion algorithm. Using a
+set of descriptor, the processing chain validates or invalidates the
+input geometrical features.
+
+Fuzzy Model (requisite)
+~~~~~~~~~~~~~~~~~~~~~~~
+
+The *DSFuzzyModelEstimation* application performs the fuzzy model
+estimation (once by use case: descriptor set / Belief support /
+Plausibility support). It has the following input parameters :
+
+-  ``-psin`` a vector data of positive samples enriched according to the
+   “Compute Descriptors” part
+
+-  ``-nsin`` a vector data of negative samples enriched according to the
+   “Compute Descriptors” part
+
+-  ``-belsup`` a support for the Belief computation
+
+-  ``-plasup`` a support for the Plausibility computation
+
+-  ``-desclist`` an initialization model (xml file) or a descriptor name
+   list (listing the descriptors to be included in the model)
+
+The application can be used like this:
+
+::
+
+    otbcli_DSFuzzyModelEstimation -psin     PosSamples.shp
+                                  -nsin     NegSamples.shp
+                                  -belsup   "ROADSA"
+                                  -plasup   "NONDVI" "ROADSA" "NOBUIL"
+                                  -desclist "NONDVI" "ROADSA" "NOBUIL"
+                                  -out      FuzzyModel.xml
+
+The output file ``FuzzyModel.xml`` contains the optimal model to perform
+informations fusion.
+
+First Step: Compute Descriptors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The first step in the classifier fusion based validation is to compute,
+for each studied polyline, the choosen descriptors. In this context, the
+*ComputePolylineFeatureFromImage* application can be used for a large
+range of descriptors. It has the following inputs :
+
+-  ``-in`` an image (of the sudied scene) corresponding to the choosen
+   descriptor (NDVI, building Mask…)
+
+-  ``-vd`` a vector data containing polyline of interest
+
+-  ``-expr`` a formula (“b1 >0.4”, “b1 == 0”) where b1 is the standard
+   name of input image first band
+
+-  ``-field`` a field name corresponding to the descriptor codename
+   (NONDVI, ROADSA...)
+
+The output is a vector data containing polylines with a new field
+containing the descriptor value. In order to add the “NONDVI” descriptor
+to an input vector data (“inVD.shp”) corresponding to the percentage of
+pixels along a polyline that verifies the formula “NDVI >0.4” :
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   NDVI.TIF
+                                           -vd  inVD.shp
+                                           -expr  "b1 > 0.4"
+                                           -field "NONDVI"
+                                           -out   VD_NONDVI.shp
+
+``NDVI.TIF`` is the NDVI mono band image of the studied scene. This step
+must be repeated for each choosen descriptor:
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   roadSpectralAngle.TIF
+                                           -vd  VD_NONDVI.shp
+                                           -expr  "b1 > 0.24"
+                                           -field "ROADSA"
+                                           -out   VD_NONDVI_ROADSA.shp
+
+::
+
+    otbcli_ComputePolylineFeatureFromImage -in   Buildings.TIF
+                                           -vd  VD_NONDVI_ROADSA.shp
+                                           -expr  "b1 == 0"
+                                           -field "NOBUILDING"
+                                           -out   VD_NONDVI_ROADSA_NOBUIL.shp
+
+Both ``NDVI.TIF`` and ``roadSpectralAngle.TIF`` can be produced using
+**Monteverdi** feature extraction capabilities, and ``Buildings.TIF``
+can be generated using **Monteverdi** rasterization module. From now on,
+``VD_NONDVI_ROADSA_NOBUIL.shp`` contains three descriptor fields. It
+will be used in the following part.
+
+Second Step: Feature Validation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The final application (*VectorDataDSValidation* ) will validate or
+unvalidate the studied samples using `the Dempster-Shafer
+theory <http://en.wikipedia.org/wiki/Dempster%E2%80%93Shafer_theory>`_ 
+. Its inputs are :
+
+-  ``-in`` an enriched vector data “VD\_NONDVI\_ROADSA\_NOBUIL.shp”
+
+-  ``-belsup`` a support for the Belief computation
+
+-  ``-plasup`` a support for the Plausibility computation
+
+-  ``-descmod`` a fuzzy model FuzzyModel.xml
+
+The output is a vector data containing only the validated samples.
+
+::
+
+    otbcli_VectorDataDSValidation -in      extractedRoads_enriched.shp
+                                  -descmod FuzzyModel.xml
+                                  -out     validatedSamples.shp
+
diff --git a/Documentation/Cookbook/rst/recipes/numpy.rst b/Documentation/Cookbook/rst/recipes/numpy.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6e51f20d9ba77ce879163d11d093dc772c9c80d5
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/numpy.rst
@@ -0,0 +1,44 @@
+Numpy processing in OTB Applications
+====================================
+
+Input and output images to any OTB application in the form of numpy array is now possible in OTB python wrapping.
+The python wrapping only exposes OTB ApplicationEngine module which allow to access existing C++ applications. 
+Due to blissful nature of ApplicationEngine's loading mechanism no specific wrapping is required for each application. 
+
+Numpy extenstion to Python wrapping allows data exchange to application as an array rather than a disk file. 
+Ofcourse, it is possible to load an image from file and then convert to numpy array or just provide a file as earlier via
+Application.SetParameterString(...). 
+
+This brige that completes numpy and OTB makes it easy to plug OTB into any image processing chain via python code that uses 
+GIS/Image processing tools such as GDAL, GRASS GIS, OSSIM that can deal with numpy.
+
+
+Below code reads an input image using python pillow (PIL) and convert it to numpy array. This numpy array is
+used an input to the application set *SetImageFromNumpyArray(...)* method.
+The application used in this example is `ExtractROI <../Applications/app_ExtractROI.html>`_. After extracting 
+a small area the ouput image is taken as numpy array with *GetImageFromNumpyArray(...)* method
+
+::
+
+   import sys
+   import os
+   import numpy as np
+   import otbApplication
+   from PIL import Image as PILImage
+
+   pilimage = PILImage.open('poupees.jpg')
+   npimage = np.asarray(pilimage)
+   inshow(pilimage)
+
+   ExtractROI = otbApplication.Registry.CreateApplication('ExtractROI')
+   ExtractROI.SetImageFromNumpyArray('in', npimage)
+   ExtractROI.SetParameterInt('startx', 140)
+   ExtractROI.SetParameterInt('starty', 120)
+   ExtractROI.SetParameterInt('sizex', 150)
+   ExtractROI.SetParameterInt('sizey', 150)
+   ExtractROI.Execute()
+
+   ExtractOutput = ExtractROI.GetImageAsNumpyArray('out')
+   output_pil_image = PILImage.fromarray(np.uint8(ExtractOutput))
+   imshow(output_pil_image)
+
diff --git a/Documentation/Cookbook/rst/recipes/optpreproc.rst b/Documentation/Cookbook/rst/recipes/optpreproc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..af48ae8d5183d0bb62060c95cf1b7d1b10e1357d
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/optpreproc.rst
@@ -0,0 +1,546 @@
+From raw image to calibrated product
+====================================
+
+This section presents various pre-processing tasks that are presented in
+a classical order to obtain a calibrated, pan-sharpened image.
+
+Optical radiometric calibration
+-------------------------------
+
+In remote sensing imagery, pixel values are called DN (for Digital
+Numbers) and can not be physically interpreted and compared: they are
+influenced by various factors such as the amount of light flowing trough
+the sensor, the gain of the detectors and the analogic to numeric
+converter.
+
+Depending on the season, the light and atmospheric conditions, the
+position of the sun or the sensor internal parameters, these DN can
+drastically change for a given pixel (apart from any ground change
+effects). Moreover, these effects are not uniform over the spectrum: for
+instance aerosol amount and type has usually more impact on the blue
+channel.
+
+Therefore, it is necessary to calibrate the pixel values before any
+physical interpretation is made out of them. In particular, this
+processing is mandatory before any comparison of pixel spectrum between
+several images (from the same sensor), and to train a classifier without
+dependence to the atmospheric conditions at the acquisition time.
+
+Calibrated values are called surface reflectivity, which is a ratio
+denoting the fraction of light that is reflected by the underlying
+surface in the given spectral range. As such, its values lie in the
+range :math:`[0,1]`. For convenience, images are often stored in
+thousandth of reflectivity, so that they can be encoded with an integer
+type. Two levels of calibration are usually distinguished:
+
+-  The first level is called *Top Of Atmosphere (TOA)* reflectivity. It
+   takes into account the sensor gain, sensor spectral response and the
+   solar illumination.
+
+-  The second level is called *Top Of Canopy (TOC)* reflectivity. In
+   addition to sensor gain and solar illumination, it takes into account
+   the optical thickness of the atmosphere, the atmospheric pressure,
+   the water vapor amount, the ozone amount, as well as the composition
+   and amount of aerosol gasses.
+
+This transformation can be done either with **OTB Applications** or with
+**Monteverdi** . Sensor-related parameters such as gain, date, spectral
+sensitivity and sensor position are seamlessly read from the image
+metadata. Atmospheric parameters can be tuned by the user. Supported
+sensors are :
+
+-  Pleiades
+
+-  SPOT5
+
+-  QuickBird
+
+-  Ikonos
+
+-  WorldView-1
+
+-  WorldView-2
+
+-  Formosat
+
+Optical calibration with **OTB Applications** 
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *OpticalCalibration* application allows to perform optical
+calibration. The mandatory parameters are the input and output images.
+All other parameters are optional. By default the level of calibration
+is set to TOA (Top Of Atmosphere). The output images are expressed in
+thousandth of reflectivity using a 16 bits unsigned integer type.
+
+A basic TOA calibration task can be performed with the following command:
+
+::
+
+    otbcli_OpticalCalibration -in  input_image -out output_image
+
+A basic TOC calibration task can be performed with the following command:
+
+::
+
+    otbcli_OpticalCalibration -in  input_image -out output_image -level toc
+
+Optical calibration with **Monteverdi** 
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+These transformations can also be done in **Monteverdi** .
+
+The 6S model needs atmospheric parameters to be able to compute
+radiative terms to estimate the atmospheric contributions on the input
+signal. Default parameters are available in the module. For atmospheric
+parameters, it is possible to indicate AERONET file. The AERONET
+(AErosol RObotic NETwork) program is a federation of ground-based remote
+sensing aerosol networks established by NASA and PHOTONS (Univ. of Lille
+1, CNES, and CNRS-INSU) and is greatly expanded by collaborators from
+national agencies, institutes, universities, individual scientists, and
+partners. The program provides accessible public domain database of
+aerosol optical, mircrophysical and radiative properties.
+
+The module produces four outputs:
+
+-  Luminance image.
+
+-  TOA reflectance image.
+
+-  TOC reflectance image.
+
+-  Difference TOA-TOC image, which allows to get the estimation of
+   atmospheric contribution.
+
+.. figure:: ../Art/MonteverdiImages/monteverdi_optical_calibration.png
+
+   Figure 1 : Optical calibration module.
+
+.. figure:: ../Art/MonteverdiImages/monteverdi_optical_calibration_outputs.png
+
+   Figure 2 : Optical calibration module’s outputs.
+
+Pan-sharpening
+--------------
+
+Because of physical constrains on the sensor design, it is difficult to
+achieve high spatial and spectral resolution at the same time : a better
+spatial resolution means a smaller detector, which in turns means lesser
+optical flow on the detector surface. On the contrary, spectral bands
+are obtained through filters applied on the detector surface, that
+lowers the optical flow, so that it is necessary to increase the
+detector size to achieve an acceptable signal to noise ratio.
+
+For these reasons, many high resolution satellite payload are composed
+of two sets of detectors, which in turns delivers two different kind of
+images :
+
+-  The multi-spectral (XS) image, composed of 3 to 8 spectral bands
+   containing usually blue, green, red and near infra-red bands at a
+   given resolution (usually from 2.8 meters to 2 meters).
+
+-  The panchromatic (PAN) image, which is a grayscale image acquired by
+   a detector covering a wider part of the light spectrum, which allows
+   to increase the optical flow and thus to reduce pixel size.
+   Therefore, resolution of the panchromatic image is usually around 4
+   times lower than the resolution of the multi-spectral image (from 46
+   centimeters to 70 centimeters).
+
+It is very frequent that those two images are delivered side by side by
+data providers. Such a dataset is called a bundle. A very common remote
+sensing processing is to fuse the panchromatic image with the
+multi-spectral one so as to get an image combining the spatial
+resolution of the panchromatic image with the spectral richness of the
+multi-spectral image. This operation is called pan-sharpening.
+
+This fusion operation requires two different steps :
+
+#. The multi-spectral (XS) image is zoomed and registered to the
+   panchromatic image,
+
+#. A pixel-by-pixel fusion operator is applied to the co-registered
+   pixels of the multi-spectral and panchromatic image to obtain the
+   fused pixels.
+
+Using either **OTB Applications** or modules from **Monteverdi** , it is
+possible to perform both steps in a row, or step-by-step fusion, as
+described in the above sections.
+
+Pan-sharpening with **OTB Applications** 
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *BundleToPerfectSensor* application allows to perform both steps in
+a row. Seamless sensor modelling is used to perform zooming and
+registration of the multi-spectral image on the panchromatic image. In
+the case of a Pléiades bundle, a different approach is used : an affine
+transform is used to zoom the multi-spectral image and apply a residual
+translation. This translation is computed based on metadata about the
+geometric processing of the bundle. This zooming and registration of the
+multi-spectral image over the panchromatic image can also be performed
+by the *Superimpose* application.
+
+After the registration step, a simple pan-sharpening is applied,
+according to the following formula:
+
+.. math:: PXS(i,j) = \frac{PAN(i,j)}{PAN_{smooth}(i,j)} \cdot XS(i,j)
+
+Where :math:`i` and :math:`j` are pixels indices, :math:`PAN` is the
+panchromatic image, :math:`XS` is the multi-spectral image and
+:math:`PAN_{smooth}` is the panchromatic image smoothed with a kernel to
+fit the multi-spectral image scale.
+
+Here is a simple example of how to use the *BundleToPerfectSensor*
+application:
+
+::
+
+    otbcli_BundleToPerfectSensor -inp pan_image -inxs xs_image -out output_image
+
+There are also optional parameters that can be useful for this tool:
+
+-  The ``-elev`` option allows to specify the elevation, either with a
+   DEM formatted for OTB (``-elev.dem`` option, see section [ssec:dem])
+   or with an average elevation (``-elev.default`` option). Since
+   registration and zooming of the multi-spectral image is performed
+   using sensor-models, it may happen that the registration is not
+   perfect in case of landscape with high elevation variation. Using a
+   DEM in this case allows to get better registration.
+
+-  The ``-lmSpacing`` option allows to specify the step of the
+   registration grid between the multi-spectral image and panchromatic
+   image. This is expressed in amount of panchromatic pixels. A lower
+   value gives a more precise registration but implies more computation
+   with the sensor models, and thus increase the computation time.
+   Default value is 10 pixels, which gives sufficient precision in most
+   of the cases.
+
+-  The ``-mode`` option allows to select the registration mode for the
+   multi-spectral image. The ``default`` mode uses the sensor model of
+   each image to create a generic “MS to Pan” transform. The ``phr``
+   mode uses a simple affine transform (which doesn’t need an elevation
+   source nor a registration grid).
+
+Pan-sharpening is a quite heavy processing requiring a lot of system
+resource. The ``-ram`` option allows you to limit the amount of memory
+available for the computation, and to avoid overloading your computer.
+Increasing the available amount of RAM may also result in better
+computation time, seems it optimises the use of the system resources.
+Default value is 256 Mb.
+
+Pan-sharpening with **Monteverdi** 
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+**Monteverdi** allows to perform step-by-step fusion. The followings
+screenshots highlight operations needed to perform Pan-Sharpening.
+
+-  Open panchromatic and multispectral images in monteverdi using the
+   *Open Dataset* module or using the ``-il`` option of the
+   **Monteverdi** executable.
+
+-  The *Superimpose* module is used to zoomed and registered the
+   multispectral on the panchromatic image. As a result, we get a
+   multispectral dataset with the same geographic extension and the same
+   resolution as the panchromatic image, cf  [fig:qbmulsuper].
+
+.. figure:: ../Art/MonteverdiImages/monteverdi_QB_PAN_ROI.png
+
+
+
+.. figure:: ../Art/MonteverdiImages/monteverdi_QB_MUL_Superimpose.png
+
+   Figure 4 : Panchromatic, Zoomed, and registered multispectral image. 
+
+
+-  Now the *Simple RCS pan-sharpening* module can be used using the
+   panchromatic and the multispectral images as inputs. It produces a
+   multispectral image with the same resolution and geographic extension
+   (cf `Figure 5`).
+
+.. figure:: ../Art/MonteverdiImages/monteverdi_QB_XS_pan-sharpened.png
+
+   Figure 5 : Pan-sharpened image using the simple RCS module. 
+
+Please also note that since registration and zooming of the
+multi-spectral image with the panchromatic image relies on sensor
+modelling, this tool will work only for images whose sensor models is
+available in **Orfeo Toolbox** (see :ref:`section3` for a detailed
+list). It will also work with ortho-ready products in cartographic
+projection.
+
+.. _section2:
+
+Digital Elevation Model management
+----------------------------------
+
+A Digital Elevation Model (DEM) is a georeferenced image (or collection
+of images) where each pixel corresponds to a local elevation. DEM are
+useful for tasks involving sensor to ground and ground to sensor
+coordinate transforms, like during ortho-rectification (see :ref:`section3`). These transforms need to find the intersection
+between the line of sight of the sensor and the earth geoid. If a simple
+spheroid is used as the earth model, potentially high localisation
+errors can be made in areas where elevation is high or perturbed. Of
+course, DEM accuracy and resolution have a great impact on the precision
+of these transforms.
+
+Two main available DEM, free of charges, and with worldwide cover, are
+both delivered as 1 degree by 1 degree tiles:
+
+-  `The Shuttle Radar topographic Mission
+   (SRTM) <http://www2.jpl.nasa.gov/srtm/>`_  is a 90 meters resolution
+   DEM, obtained by radar interferometry during a campaign of the
+   Endeavour space shuttle from NASA in 2000.
+
+-  The `Advanced Spaceborne Thermal Emission and Reflection Radiometer
+   (ASTER) <http://www.ersdac.or.jp/GDEM/E/2.html>`_  is a 30 meters
+   resolution DEM obtained by stereoscopic processing of the archive of
+   the ASTER instrument.
+
+The **Orfeo Toolbox** relies on `OSSIM <http://www.ossim.org/>`_ 
+capabilities for sensor modelling and DEM handling. Tiles of a given DEM
+are supposed to be located within a single directory. General elevation
+support is also supported from GeoTIFF files.
+
+Whenever an application or **Monteverdi** module requires a DEM, the
+option **elev.dem** allows set the DEM directory. This directory must
+contains the DEM tiles, either in DTED or SRTM format, either as GeoTIFF
+files. Subdirectories are not supported.
+
+Depending on the reference of the elevation, you also need to use a
+geoid to manage elevation accurately. For this, you need to specify a
+path to a file which contains the geoid. `Geoid <http://en.wikipedia.org/wiki/Geoid>`_ 
+corresponds to the equipotential surface that would coincide with the mean ocean surface of
+the Earth . 
+
+We provide one geoid in the `OTB-Data  <http://hg.orfeo-toolbox.org/OTB-Data/file/4722d9e672c6/Input/DEM/egm96.grd>`_ repository.
+
+In all applications, the option **elev.geoid** allows to manage the path
+to the geoid. Finally, it is also possible to use an average elevation
+in case no DEM is available by using the **elev.default** option.
+
+
+.. _section3:
+
+Ortho-rectification and map projections
+---------------------------------------
+
+There are several level of products available on the remote sensing
+imagery market. The most basic level often provide the geometry of
+acquisition (sometimes called the raw geometry). In this case, pixel
+coordinates can not be directly used as geographical positions. For most
+sensors (but not for all), the different lines corresponds to different
+acquisition times and thus different sensor positions, and different
+rows correspond to different cells of the detector.
+
+The mapping of a raw image so as to be registered to a cartographic grid
+is called ortho-rectification, and consist in inverting the following
+effects (at least):
+
+-  In most cases, lines are orthogonal to the sensor trajectory, which
+   is not exactly (and in some case not at all) following a north-south
+   axis,
+
+-  Depending on the sensor, the line of sight may be different from a
+   Nadir (ground position of the sensor), and thus a projective warping
+   may appear,
+
+-  The variation of height in the landscape may result in severe warping
+   of the image.
+
+Moreover, depending on the area of the world the image has been acquired
+on, different map projections should be used.
+
+The ortho-rectification process is as follows: once an appropriate map
+projection has been defined, a localisation grid is computed to map
+pixels from the raw image to the ortho-rectified one. Pixels from the
+raw image are then interpolated according to this grid in order to fill
+the ortho-rectified pixels.
+
+Ortho-rectification can be performed either with **OTB Applications** or
+**Monteverdi** . Sensor parameters and image meta-data are seamlessly
+read from the image files without needing any user interaction, provided
+that all auxiliary files are available. The sensor for which **Orfeo
+Toolbox** supports ortho-rectification of raw products are the
+following:
+
+-  Pleiades
+
+-  SPOT5
+
+-  Ikonos
+
+-  Quickbird
+
+-  GeoEye
+
+-  WorldView
+
+In addition, GeoTiff and other file format with geographical information
+are seamlessly read by **Orfeo Toolbox** , and the ortho-rectification
+tools can be used to re-sample these images in another map projection.
+
+Beware of “ortho-ready” products
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are some image products, called “ortho-ready”, that should be
+processed carefully. They are actual products in raw geometry, but their
+metadata also contains projection data :
+
+-  a map projection
+
+-  a physical origin
+
+-  a physical spacing
+
+-  and sometimes an orientation angle
+
+The purpose of this projection information is to give an approximate map
+projection to a raw product. It allows you to display the raw image in a
+GIS viewer at the (almost) right location, without having to reproject
+it. Obviously, this map projection is not as accurate as the sensor
+parameters of the raw geometry. In addition, the impact of the elevation
+model can’t be observed if the map projection is used. In order to
+perform an ortho-rectification on this type of product, the map
+projection has to be hidden from **Orfeo Toolbox** .
+
+You can see if a product is an “ortho-ready” product by using tools such
+as ``gdalinfo`` or `ReadImageInfo <../Applications/app_ReadImageInfo.html>`_, and check
+if the product verifies the 2 following conditions :
+
+-  The product is in raw geometry : you should expect the presence of
+   RPC coefficients and a non-empty OSSIM keywordlist.
+
+-  The product has a map projection : you should see a projection name
+   with physical origin and spacing.
+
+In that case, you can hide the map projection from the **Orfeo Toolbox**
+by using *extended* filenames. Instead of using the plain input image
+path, you append a specific key at the end :
+
+::
+
+    "path_to_image?&skipcarto=true"
+
+The double quote can be necessary for a successful parsing. More details
+about the extended filenames can be found in the `wiki page <http://wiki.orfeo-toolbox.org/index.php/ExtendedFileName>`_ , and
+also in the `OTB Software Guide <http://orfeo-toolbox.org/SoftwareGuide>`_  .
+
+Ortho-rectification with **OTB Applications** 
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *OrthoRectification* application allows to perform
+ortho-rectification and map re-projection. The simplest way to use it is
+the following command:
+
+::
+
+    otbcli_OrthoRectification -io.in input_image -io.out output_image
+
+In this case, the tool will automatically estimates all the necessary
+parameters:
+
+-  The map projection is set to UTM (a worldwide map projection) and the
+   UTM zone is automatically estimated,
+
+-  The ground sampling distance of the output image is computed to fit
+   the image resolution,
+
+-  The region of interest (upper-left corner and size of the image) is
+   estimated so as to contain the whole input image extent.
+
+In order to use a Digital Elevation Model (see  :ref:`section2`.) for
+better localisation performances, one can pass the directory containing
+the DEM tiles to the application:
+
+::
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+
+If one wants to use a different map projection, the *-map* option may be
+used (example with *lambert93* map projection):
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+
+Map projections handled by the application are the following (please
+note that the ellipsoid is always WGS84):
+
+-  | UTM : ``-map utm``  | The UTM zone and hemisphere can be set by the options ``-map.utm.zone`` and ``-map.utm.northhem``.
+
+-  Lambert 2 etendu: ``-map lambert2``
+
+-  Lambert 93: ``-map lambert93``
+
+-  | TransMercator: ``-map transmercator`` | The related parameters (false easting, false northing and scale factor) can be set by the options    ``-map.transmercator.falseeasting``, ``-map.transmercator.falsenorthing`` and ``-map.transmercator.scale``
+
+-  WGS : ``-map wgs``
+
+-  | Any map projection system with an EPSG code : ``-map epsg`` | The EPSG code is set with the option ``-map.epsg.code``
+
+The group ``outputs`` contains parameters to set the origin, size and
+spacing of the output image. For instance, the ground spacing can be
+specified as follows:
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+                              -outputs.spacingx spx
+                              -outputs.spacingy spy
+
+Please note that since the y axis of the image is bottom oriented, the y
+spacing should be negative to avoid switching north and south direction.
+
+A user-defined region of interest to ortho-rectify can be specified as
+follows:
+
+::
+
+
+    otbcli_OrthoRectification -io.in input_image
+                              -io.out output_image
+                              -elev.dem dem_dir
+                              -map lambert93
+                              -outputs.spacingx spx
+                              -outputs.spacingy spy
+                              -outputs.ulx ul_x_coord
+                              -outputs.uly ul_y_coord
+                              -outputs.sizex x_size
+                              -outputs.sizey y_size
+
+Where the ``-outputs.ulx`` and ``-outputs.uly`` options allow to specify
+the coordinates of the upper-left corner of the output image. The
+``-outputs.sizex`` and ``-outputs.sizey`` options allow to specify the
+size of the output image.
+
+A few more interesting options are available:
+
+-  The ``-opt.rpc`` option allows to use an estimated RPC model instead
+   of the rigorous SPOT5 model, which speeds-up the processing,
+
+-  The ``-opt.gridspacing`` option allows to define the spacing of the
+   localisation grid used for ortho-rectification. A coarser grid
+   results in speeding-up the processing, but with potential loss of
+   accuracy. A standard value would be 10 times the ground spacing of
+   the output image.
+
+-  The ``-interpolator`` option allows to change the interpolation
+   algorithm between nearest neighbor, linear and bicubic. Default is
+   nearest neighbor interpolation, but bicubic should be fine in most
+   cases.
+
+-  The ``-opt.ram`` option allows to specify the amount of memory
+   available for the processing (in Mb). Default is 256 Mb. Increasing
+   this value to fit the available memory on your computer might
+   speed-up the processing.
+
+
+
diff --git a/Documentation/Cookbook/rst/recipes/pbclassif.rst b/Documentation/Cookbook/rst/recipes/pbclassif.rst
new file mode 100644
index 0000000000000000000000000000000000000000..43a6524c9d867b653878c6b34a3cca74dc7c32fe
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/pbclassif.rst
@@ -0,0 +1,475 @@
+Classification
+==============
+
+.. _section4:
+
+Pixel based classification
+--------------------------
+
+The classification in the application framework provides a supervised
+pixel-wise classification chain based on learning from multiple images,
+and using one specified machine learning method like SVM, Bayes, KNN,
+Random Forests, Artificial Neural Network, and others...(see application
+help of *TrainImagesClassifier* for further details about all the
+available classifiers). It supports huge images through streaming and
+multi-threading. The classification chain performs a training step based
+on the intensities of each pixel as features. Please note that all the
+input images must have the same number of bands to be comparable.
+
+Statistics estimation
+~~~~~~~~~~~~~~~~~~~~~
+
+In order to make these features comparable between each training images,
+the first step consists in estimating the input images statistics. These
+statistics will be used to center and reduce the intensities (mean of 0
+and standard deviation of 1) of samples based on the vector data
+produced by the user. To do so, the *ComputeImagesStatistics* tool can
+be used:
+
+::
+
+    otbcli_ComputeImagesStatistics -il  im1.tif im2.tif im3.tif
+                                   -out images_statistics.xml
+
+This tool will compute each band mean, compute the standard deviation
+based on pooled variance of each band and finally export them to an XML
+file. The features statistics XML file will be an input of the following
+tools.
+
+Building the training data set
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As the chain is supervised, we first need to build a training set with
+positive examples of different objects of interest. This can be done
+with Monteverdi Vectorization module (`Figure 1`). These polygons must be saved in
+OGR vector format supported by GDAL like ESRI shapefile for example.
+
+This operation will be reproduced on each image used as input of the
+training function.
+
+Please note that the positive examples in the vector data should have a
+*Class* field with a label value higher than 1 and coherent in
+each images.
+
+.. figure::  ../Art/MonteverdiImages/monteverdi_vectorization_module_for_classification.png
+
+Figure 1: A training data set builded with the vectorization monteverdi module.
+
+You can generate the vector data set with `Quantum GIS <http://www.qgis.org/>`_  
+software for example and save it in an OGR vector format supported by `GDAL <http://www.gdal.org/>`_  
+(ESRI sphapefile for example). **OTB Applications** should be able to
+transform the vector data into the image coordinate system.
+
+Performing the learning scheme
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once images statistics have been estimated, the learning scheme is the
+following:
+
+#. For each input image:
+
+   #. Read the region of interest (ROI) inside the shapefile,
+
+   #. Generate validation and training data within the ROI,
+
+   #. Add vectors respectively to the training samples set and the
+      validation samples set.
+
+#. Increase the size of the training samples set and balance it by
+   generating new noisy samples from the previous ones,
+
+#. Perform the learning with this training set
+
+#. Estimate performances of the classifier on the validation samples set
+   (confusion matrix, precision, recall and F-Score).
+
+Let us consider a SVM classification. These steps can be performed by
+the *TrainImagesClassifier* command-line using the following:
+
+::
+
+    otbcli_TrainImagesClassifier -io.il      im1.tif im2.tif im3.tif
+                                 -io.vd      vd1.shp vd2.shp vd3.shp
+                                 -io.imstat  images_statistics.xml
+                                 -classifier svm (classifier_for_the_training)
+                                 -io.out     model.svm
+
+Additional groups of parameters are also available (see application help
+for more details):
+
+-  ``-elev`` Handling of elevation (DEM or average elevation)
+
+-  ``-sample`` Group of parameters for sampling
+
+-  ``-classifier`` Classifiers to use for the training, and their
+   corresponding groups of parameters
+
+Using the classification model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once the classifier has been trained, one can apply the model to
+classify pixel inside defined classes on a new image using the
+*ImageClassifier* application:
+
+::
+
+    otbcli_ImageClassifier -in     image.tif
+                           -imstat images_statistics.xml
+                           -model  model.svm
+                           -out    labeled_image.tif
+
+You can set an input mask to limit the classification to the mask area
+with value >0.
+
+Validating the classification model
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The performance of the model generated by the *TrainImagesClassifier*
+application is directly estimated by the application itself, which
+displays the precision, recall and F-score of each class, and can
+generate the global confusion matrix as an output \*.CSV file.
+
+With the *ConputeConfusionMatrix* application, it is also possible to
+estimate the performance of a model from a classification map generated
+with the *ImageClassifier* application. This labeled image is compared
+to positive reference samples (either represented as a raster labeled
+image or as a vector data containing the reference classes). It will
+compute the confusion matrix and precision, recall and F-score of each
+class too, based on the
+`ConfusionMatrixCalculator <http://www.orfeo-toolbox.org/doxygen-current/classotb_1_1ConfusionMatrixCalculator.html>`_ 
+class.
+
+::
+
+    otbcli_ComputeConfusionMatrix -in                labeled_image.tif
+                                  -ref               vector
+                                  -ref.vector.in     vectordata.shp
+                                  -ref.vector.field  Class (name_of_label_field)
+                                  -out               confusion_matrix.csv
+
+Fancy classification results
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Color mapping can be used to apply color transformations on the final
+graylevel label image. It allows to get an RGB classification map by
+re-mapping the image values to be suitable for display purposes. One can
+use the *ColorMapping* application. This tool will replace each label
+with an 8-bits RGB color specificied in a mapping file. The mapping file
+should look like this :
+
+::
+
+    # Lines beginning with a # are ignored
+    1 255 0 0
+
+In the previous example, 1 is the label and 255 0 0 is a RGB color (this
+one will be rendered as red). To use the mapping tool, enter the
+following :
+
+::
+
+    otbcli_ColorMapping -in                labeled_image.tif
+                        -method            custom
+                        -method.custom.lut lut_mapping_file.txt
+                        -out               RGB_color_image.tif
+
+Other look-up tables (LUT) are available : standard continuous LUT,
+optimal LUT, and LUT computed over a support image.
+
+Example
+~~~~~~~
+
+We consider 4 classes: water, roads, vegetation and buildings with red
+roofs. Data is available in the OTB-Data
+`repository <http://hg.orfeo-toolbox.org/OTB-Data/file/0fed8f4f035c/Input/Classification>`_ 
+and this image is produced with the commands inside this
+`file <http://hg.orfeo-toolbox.org/OTB-Applications/file/3ce975605013/Testing/Classification/CMakeLists.txt>`_ .
+
+
+|image2| |image3| |image4| 
+
+Figure 2: From left to right: Original image, result image with fusion (with monteverdi viewer) of original image and fancy classification and input image with fancy color classification from labeled image. 
+
+Fusion of classification maps
+-----------------------------
+
+After having processed several classifications of the same input image
+but from different models or methods (SVM, KNN, Random Forest,...), it
+is possible to make a fusion of these classification maps with the
+*FusionOfClassifications* application which uses either majority voting
+or the Demspter Shafer framework to handle this fusion. The Fusion of
+Classifications generates a single more robust and precise
+classification map which combines the information extracted from the
+input list of labeled images.
+
+The *FusionOfClassifications* application has the following input
+parameters :
+
+-  ``-il`` list of input labeled classification images to fuse
+
+-  ``-out`` the output labeled image resulting from the fusion of the
+   input classification images
+
+-  ``-method`` the fusion method (either by majority voting or by
+   Dempster Shafer)
+
+-  ``-nodatalabel`` label for the no data class (default value = 0)
+
+-  ``-undecidedlabel`` label for the undecided class (default value = 0)
+
+The input pixels with the nodata class label are simply ignored by the
+fusion process. Moreover, the output pixels for which the fusion process
+does not result in a unique class label, are set to the undecided value.
+
+Majority voting for the fusion of classifications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the Majority Voting method implemented in the
+*FusionOfClassifications* application, the value of each output pixel is
+equal to the more frequent class label of the same pixel in the input
+classification maps. However, it may happen that the more frequent class
+labels are not unique in individual pixels. In that case, the undecided
+label is attributed to the output pixels.
+
+The application can be used like this:
+
+::
+
+    otbcli_FusionOfClassifications  -il             cmap1.tif cmap2.tif cmap3.tif
+                                    -method         majorityvoting
+                                    -nodatalabel    0
+                                    -undecidedlabel 10
+                                    -out            MVFusedClassificationMap.tif
+
+Let us consider 6 independent classification maps of the same input
+image (Cf. left image in `Figure 1`) generated from 6 different SVM models. 
+The `Figure 2` represents them after a color mapping by the same LUT. 
+Thus, 4 classes (water: blue, roads: gray,vegetation: green, 
+buildings with red roofs: red) are observable on each of them.
+
+|image5| |image6| |image7| 
+
+|image8| |image9| |image10|
+
+Figure 3: Six fancy colored classified images to be fused, generated from 6 different SVM models. 
+
+As an example of the *FusionOfClassifications* application by *majority
+voting*, the fusion of the six input classification maps represented in
+`Figure 3` leads to the classification map illustrated on the right in `Figure 4`.
+Thus, it appears that this fusion highlights the more relevant classes among the six different
+input classifications. The white parts of the fused image correspond to
+the undecided class labels, i.e. to pixels for which there is not a
+unique majority voting.
+
+|image11| |image12|
+
+Figure 4: From left to right: Original image, and fancy colored classified image obtained by a majority voting fusion of the 6 classification maps represented in Fig. 4.13 (water: blue, roads: gray, vegetation: green, buildings with red roofs: red, undecided: white)
+
+Dempster Shafer framework for the fusion of classifications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *FusionOfClassifications* application, handles another method to
+compute the fusion: the Dempster Shafer framework. In the
+`Dempster-Shafer
+theory <http://en.wikipedia.org/wiki/Dempster-Shafer_theory>`_ , the
+performance of each classifier resulting in the classification maps to
+fuse are evaluated with the help of the so-called *belief function* of
+each class label, which measures the degree of belief that the
+corresponding label is correctly assigned to a pixel. For each
+classifier, and for each class label, these belief functions are
+estimated from another parameter called the *mass of belief* of each
+class label, which measures the confidence that the user can have in
+each classifier according to the resulting labels.
+
+In the Dempster Shafer framework for the fusion of classification maps,
+the fused class label for each pixel is the one with the maximal belief
+function. In case of multiple class labels maximizing the belief
+functions, the output fused pixels are set to the undecided value.
+
+In order to estimate the confidence level in each classification map,
+each of them should be confronted with a ground truth. For this purpose,
+the masses of belief of the class labels resulting from a classifier are
+estimated from its confusion matrix, which is itself exported as a
+\*.CSV file with the help of the *ComputeConfusionMatrix* application.
+Thus, using the Dempster Shafer method to fuse classification maps needs
+an additional input list of such \*.CSV files corresponding to their
+respective confusion matrices.
+
+The application can be used like this:
+
+::
+
+    otbcli_FusionOfClassifications  -il             cmap1.tif cmap2.tif cmap3.tif
+                                    -method         dempstershafer
+                                    -method.dempstershafer.cmfl
+                                                    cmat1.csv cmat2.csv cmat3.csv
+                                    -nodatalabel    0
+                                    -undecidedlabel 10
+                                    -out            DSFusedClassificationMap.tif
+
+As an example of the *FusionOfClassifications* application by *Dempster
+Shafer*, the fusion of the six input classification maps represented in
+`Figure 3` leads to the classification map illustrated on the right in `Figure 5`
+[fig:ClassificationMapFusionApplicationDS]. Thus, it appears that this
+fusion gives access to a more precise and robust classification map
+based on the confidence level in each classifier.
+
+|image13| |image14|
+
+Figure 5: From left to right: Original image, and fancy colored classified image obtained by a Dempster Shafer fusion of the 6 classification maps represented in Fig. 4.13 (water: blue, roads: gray, vegetation: green, buildings with red roofs: red, undecided: white). 
+
+Recommandations to properly use the fusion of classification maps
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to properly use the *FusionOfClassifications* application, some
+points should be considered. First, the ``list_of_input_images`` and
+``OutputFusedClassificationImage`` are single band labeled images, which
+means that the value of each pixel corresponds to the class label it
+belongs to, and labels in each classification map must represent the
+same class. Secondly, the undecided label value must be different from
+existing labels in the input images in order to avoid any ambiguity in
+the interpretation of the ``OutputFusedClassificationImage``.
+
+Majority voting based classification map regularization
+-------------------------------------------------------
+
+Resulting classification maps can be regularized in order to smoothen
+irregular classes. Such a regularization process improves classification
+results by making more homogeneous areas which are easier to handle.
+
+Majority voting for the classification map regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The *ClassificationMapRegularization* application performs a
+regularization of a labeled input image based on the Majority Voting
+method in a specified ball shaped neighborhood. For each center pixel,
+Majority Voting takes the more representative value of all the pixels
+identified by the structuring element and then sets the output center
+pixel to this majority label value. The ball shaped neighborhood is
+identified by its radius expressed in pixels.
+
+Handling ambiguity and not classified pixels in the majority voting based regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Since, the Majority Voting regularization may lead to not unique
+majority labels in the neighborhood, it is important to define which
+behaviour the filter must have in this case. For this purpose, a Boolean
+parameter (called ip.suvbool) is used in the
+*ClassificationMapRegularization* application to choose whether pixels
+with more than one majority class are set to Undecided (true), or to
+their Original labels (false = default value).
+
+Moreover, it may happen that pixels in the input image do not belong to
+any of the considered class. Such pixels are assumed to belong to the
+NoData class, the label of which is specified as an input parameter for
+the regularization. Therefore, those NoData input pixels are invariant
+and keep their NoData label in the output regularized image.
+
+The *ClassificationMapRegularization* application has the following
+input parameters :
+
+-  ``-io.in`` labeled input image resulting from a previous
+   classification process
+
+-  ``-io.out`` output labeled image corresponding to the regularization
+   of the input image
+
+-  ``-ip.radius`` integer corresponding to the radius of the ball shaped
+   structuring element (default value = 1 pixel)
+
+-  ``-ip.suvbool`` boolean parameter used to choose whether pixels with
+   more than one majority class are set to Undecided (true), or to their
+   Original labels (false = default value). Please note that the
+   Undecided value must be different from existing labels in the input
+   image
+
+-  ``-ip.nodatalabel`` label for the NoData class. Such input pixels
+   keep their NoData label in the output image (default value = 0)
+
+-  ``-ip.undecidedlabel`` label for the Undecided class (default value =
+   0).
+
+The application can be used like this:
+
+::
+
+    otbcli_ClassificationMapRegularization  -io.in              labeled_image.tif
+                                            -ip.radius          3
+                                            -ip.suvbool         true
+                                            -ip.nodatalabel     10
+                                            -ip.undecidedlabel  7
+                                            -io.out             regularized.tif
+
+Recommandations to properly use the majority voting based regularization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to properly use the *ClassificationMapRegularization*
+application, some points should be considered. First, both
+``InputLabeledImage`` and ``OutputLabeledImage`` are single band labeled
+images, which means that the value of each pixel corresponds to the
+class label it belongs to. The ``InputLabeledImage`` is commonly an
+image generated with a classification algorithm such as the SVM
+classification. Remark: both ``InputLabeledImage`` and
+``OutputLabeledImage`` are not necessarily of the same datatype.
+Secondly, if ip.suvbool == true, the Undecided label value must be
+different from existing labels in the input labeled image in order to
+avoid any ambiguity in the interpretation of the regularized
+``OutputLabeledImage``. Finally, the structuring element radius must
+have a minimum value equal to 1 pixel, which is its default value. Both
+NoData and Undecided labels have a default value equal to 0.
+
+Example
+~~~~~~~
+
+Resulting from the *ColorMapping* application presented in section
+:ref:`section4` and illustrated in `Figure 2`. 
+The `Figure 6` shows a regularization of a classification map composed 
+of 4 classes: water, roads, vegetation and buildings with red roofs. 
+The radius of the ball shaped structuring element is equal to 3 pixels, 
+which corresponds to a ball included in a 7 x 7 pixels square. 
+Pixels with more than one majority class keep their original labels.
+
+|image15| |image16| |image17|
+Figure 6: From left to right: Original image, fancy colored classified image and regularized classification map with radius equal to 3 pixels. 
+
+.. |image1| image:: ../Art/MonteverdiImages/monteverdi_vectorization_module_for_classification.png
+.. |image2| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+                    :scale: 88%
+
+.. |image3| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_fusion.jpg
+                    :scale: 88%
+
+.. |image4| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif.jpg
+                    :scale: 88%
+
+.. |image5| image:: ../Art/MonteverdiImages/QB_1_ortho_C1_CM.png
+                    :scale: 88%
+
+.. |image6| image:: ../Art/MonteverdiImages/QB_1_ortho_C2_CM.png
+                    :scale: 88%
+
+.. |image7| image:: ../Art/MonteverdiImages/QB_1_ortho_C3_CM.png
+                    :scale: 88%
+
+.. |image8| image:: ../Art/MonteverdiImages/QB_1_ortho_C4_CM.png
+                    :scale: 88%
+
+.. |image9| image:: ../Art/MonteverdiImages/QB_1_ortho_C5_CM.png
+                    :scale: 88%
+
+.. |image10| image:: ../Art/MonteverdiImages/QB_1_ortho_C6_CM.png
+                    :scale: 88%
+
+.. |image11| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image12| image:: ../Art/MonteverdiImages/QB_1_ortho_MV_C123456_CM.png
+.. |image13| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+.. |image14| image:: ../Art/MonteverdiImages/QB_1_ortho_DS_V_P_C123456_CM.png
+
+.. |image15| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
+             :scale: 88%
+
+.. |image16| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_CMR_input.png
+             :scale: 88%
+
+.. |image17| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_CMR_3.png
+             :scale: 88%
diff --git a/Documentation/Cookbook/rst/recipes/pleiades.rst b/Documentation/Cookbook/rst/recipes/pleiades.rst
new file mode 100644
index 0000000000000000000000000000000000000000..529247a87c8dde6247de1c41057fa68bb2de5f24
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/pleiades.rst
@@ -0,0 +1,283 @@
+Using `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images in **OTB Applications** and **Monteverdi**
+==============================================================================================================
+
+The typical `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_
+product is a pansharpened image of 40 000 by 40 000 pixels large, with 4
+spectral bands, but one can even order larger mosaics, whose size can be
+even larger, with hundreds of thousands of pixels in each dimension.
+
+To allow easier storage and transfer of such products, the standard
+image file format is
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  , which allows to
+achieve high compression rates. The counterpart of these better storage
+and transfer performances is that the performance of pixels accesses
+within those images may be poorer than with an image format without
+compression, and even more important, the cost of accessing pixels is
+not uniform: it depends on where are the pixels you are trying to
+access, and how they are spatially arranged.
+
+To be more specific,
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images are
+internally encoded into 2048 per 2048 pixels tiles (within the
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file). These tiles
+represent the atomic decompression unit: if you need a single pixel from
+a given tile, you still have to decode the whole tile to get it. As a
+result, if you plan to access a large amount of pixels within the image,
+you should try to access them on a per tile basis, because anytime you
+ask for a given tile more than once, the performances of your processing
+chains drop.
+
+What does it mean? In **Orfeo Toolbox** , the streaming (on the flow)
+pipeline execution will try to stay synchronised with the input image
+tiling scheme to avoid decoding the same tile several time. But you may
+know that in the **Orfeo Toolbox** world, one can easily chain numerous
+processing, some them enlarging the requested region to process the
+output - like neighbourhood based operators for instance - or even
+completely change the image geometry - like ortho-rectification for
+instance. And this chaining freedom is also at the heart of
+**Monteverdi** . In short, it is very easy to build a processing
+pipeline in **Orfeo Toolbox** or chain of modules in **Monteverdi** that
+will get incredibly bad performances, even if the **Orfeo Toolbox**
+back-end does its best to stay in tune with tiles. And here, we do not
+even speak of sub-sampling the whole dataset at some point in the
+pipeline, which will lead to even more incredibly poor performances, and
+is however done anytime a viewer is called on a module output in
+**Monteverdi** .
+
+So, can **Monteverdi** or **OTB Applications** open and process
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images?
+Fortunately yes. **Monteverdi** even takes advantage of
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  ability to
+generate coarser scale images for quick-look generation for
+visualisation purposes. But to ease the use of
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images in
+**Monteverdi** , we chose to open them in a separate data type, and to
+lock the use of most of modules for this data type. It can only be used
+in the Viewer module and a dedicated module allowing to uncompress a
+user-defined part of a
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image to disk. One
+can still force the data type during the opening of the image, but this
+is not advised: the advised way to use the other modules with
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  data is to first
+uncompress to disk your area of interest, and then open it again in
+**Monteverdi** (careful, you may need a lot of disk space to do this).
+As for the applications, they will work fine even on
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  data, but keep in
+mind that a performance sink might show depending on the processing you
+are try to achieve. Again, the advised way of working would be to
+uncompress your area of interest first and then work with the
+uncompressed file, as you used to with other data.
+
+A final word about metadata: **OTB Applications** and **Monteverdi** can
+read the Dimap V2 (note that we also read the less non-official Dimap
+V1.1 format) metadata file associated with the
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file in the
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  product. It reads
+the RPC localisation model for geo-coding and the information needed to
+perform radiometric calibration. These metadata will be written in an
+associated geometry file (with a *.geom* extension) when uncompressing
+your area of interest to disk, so that both **Monteverdi** and **OTB
+Applications** will be able to retrieve them, even for images extracts.
+
+.. _section1:
+
+
+Opening a `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image in **Monteverdi**
+----------------------------------------------------------------------------------------
+
+Opening a `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image in
+**Monteverdi** is not different from opening other kind of dataset: use
+the *Open Dataset* item from the *File* menu, and select the JP2 file
+corresponding to you image using the file browser.
+
+
+.. figure:: ../Art/MonteverdiImages/pleiades_open.png
+
+   Figure 1 : Dialog window when opening a Pleiades image in Monteverdi
+
+.. figure:: ../Art/MonteverdiImages/pleiades_monteverdi.png
+
+   Figure 2 : Pleiades images in the main Monteverdi window
+
+
+`Figure 1` shows the dialog box when opening a `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_
+image in **Monteverdi** . One can see some changes with respect to
+the classical dialog box for images opening.
+The first novelty is a combo box allowing to choose the resolution of
+the `Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file one wants
+to decode. As said in the introduction of this section, **Orfeo
+Toolbox** can take advantage of
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  capability to
+access coarser resolution ver efficiently. If you select for instance
+the *Resolution: 1* item, you will end with an image half the size of
+the original image with pixels twice as big. For instance, on a
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  panchromatic or
+pansharpened product, the *Resolution: 0* image has a ground samping
+distance of 0.5 meters while the *Resolution: 1* image has a ground
+samping distance of one meter. For a multispectral product, the
+*Resolution: 0* image has a ground samping distance of 2 meters while
+the *Resolution: 1* image has a ground samping distance of 4 meters.
+
+The second novelty is a check-box called *Save quicklook for future
+re-use*. This option allows to speed-up the loading of a
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image within
+**Monteverdi** . In fact, when loading a
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image,
+**Monteverdi** generates a quicklook of this image to be used as a
+minimap in the *Viewer Module* as well as in the *Uncompress Jpeg2000
+image* module. This quicklook is the coarser level of resolution from
+the `Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file: it
+should decode easily, but can still take a while. This is why if the
+check-box is checked, **Monteverdi** will write this quicklook in
+uncompressed *Tiff* format next to the
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file. For
+instance, if the file name is:
+
+::
+
+    IMG_PHR1A_MS_201204011017343_SEN_IPU_20120529_1596-002_R1C1.JP2
+
+**Monteverdi** will write, if it can, the following files in the same
+directory:
+
+::
+
+    IMG_PHR1A_MS_201204011017343_SEN_IPU_20120529_1596-002_R1C1.JP2_ql_by_otb.tif
+    IMG_PHR1A_MS_201204011017343_SEN_IPU_20120529_1596-002_R1C1.JP2_ql_by_otb.geom
+
+Next time one will try to open this image in **Monteverdi** , the
+application will find these files and load directly the quicklook from
+them, instead of decoding it from the
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  file, resulting in
+an instant loading of the image in **Monteverdi** . Since the wheight of
+these extra files is ususally of a few megaoctets, it is recommended to
+keep this option checked unless one has a very good reason not to. Now
+that the `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image is
+loaded in **Monteverdi** , it appears in the main **Monteverdi** window,
+as shown in `Figure 2`.
+
+Viewing a `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image in **Monteverdi**
+----------------------------------------------------------------------------------------
+
+You can open the `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_
+image in the viewer, either by using the contextual menu or by opening
+the *Viewer Module* through the menu bar.
+
+You can notice that the viewer opens quickly without showing the
+traditional progress bar. This is because **Monteverdi** already loaded
+the quick-look upon opening, and we do not need to re-compute it each
+time the image is opened in the *Viewer Module*.
+
+.. figure::  ../Art/MonteverdiImages/pleiades_viewer.png
+
+   Figure 3 : A Pleiades image displayed in Monteverdi viewer. (c) CNES 2012
+
+`Figure 3` shows a `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image displayed in
+the *Viewer Module*. One can notice that the navigation experience is
+rather smooth. If you navigate using arrows keys, you will notice that
+latency can occur now and then: this is due to the viewport switching to
+a new `Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  tile to
+decode. On can also observe that the latitude and longitude of the pixel
+under the mouse pointer is displayed, which means that the sensor
+modelling is handled (if you have an internet connection, you may even
+see the actual name of the place under mouse pointer). Last, as said in
+the foreword of this section,
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image can be quite
+large, so it might be convenient to switch the viewer style from
+*Packed* to *Splitted*, in which case you will be able to maximize the
+*Scroll Window* for better localisation of the viewed area. To do so,
+one can go to the *Setup* tab of the *Viewer Control Window*.
+
+Handling mega-tiles in **Monteverdi**
+--------------------------------------
+
+If the `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  product is
+very large, it might happen that the image is actually splitted into
+several `Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  files,
+also called mega-tiles. Since the area of interest might span two or
+more mega-tiles, it is convenient to stitch together these tiles so as
+to get the entire scene into one **Monteverdi** dataset. To do so, one
+must first open all mega-tiles in **Monteverdi** , as described in :ref:`section1`.
+Once all mega-tiles are opened as shown in `Figure 1`
+
+Once this is done, one can use the *Mosaic Images module* from the
+*File* menu. Simply append all mega-tiles into the module and run it:
+the module will look for the :math:`RiCj` pattern to determine the
+mega-tiles layout, and will also check for consistency, e.g. missing
+tiles or mega-tiles size mismatch. Upon success, it generates a new
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image dataset,
+which corresponding to the entire scene, as shown in `Figure 4`. One can
+then use this dataset as a regular
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  dataset.
+
+.. figure::  ../Art/MonteverdiImages/pleiades_mtiles_open.png
+
+Figure 4: Pleiades mega-tiles and output mosaic in Monteverdi
+
+Partial uncompressing of `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images in **Monteverdi**
+--------------------------------------------------------------------------------------------------------
+
+The next very important thing one can do with **Monteverdi** is to
+select an area of interest in the
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image so as to
+uncompress it to disk. To do so, open the
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  dataset into the
+*Uncompress Jpeg2000 image module* from the *File* menu.
+
+.. figure::  ../Art/MonteverdiImages/pleiades_uncom.png
+
+Figure 5: A Pleiades image in Monteverdi Uncompress Jpeg2000 image module. (c) CNES 2012
+
+`Figure 5` shows what this module looks like. On the left, one can find
+informations about the images dimensions, resolution level, and number of
+`Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_  tiles in image,
+dimension of tiles, and size of tiles in mega-octets. The center part of
+the module is the most important one: it displays a quick-look of the
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  image. On this
+quick-look, one can select the area to be decoded by drawing a rectangle
+with the mouse. The red rectangle shown by the module corresponds to
+this user-defined area. On the left, in red, one can find the start
+index and size of corresponding region.
+
+The module also displays a green rectangle, which shows the minimum set
+of tiles to be decoded to decode the red area: **this is the region that
+will actually be decoded to disk**. On the left, in green, one can find
+information about this region: how many tiles it contains, and what will
+be the size of the corresponding decoded output file.
+
+Once one chose her area of interest, one can click on the *Save* button,
+and select an output file. The module will write a geometry file (with
+the *.geom* extension) with all useful metadata in it, so that when
+reading back the file in **Monteverdi** or in **OTB Applications** ,
+geometry and radiometry based functionalities can still be used.
+
+
+Other processing of `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images with **Monteverdi**
+-----------------------------------------------------------------------------------------------------
+
+For all the reasons exposed in the foreword of this section, we do not
+allow to use directly
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images in the
+remaining of **Monteverdi** modules: the advised way of doing so is to
+first uncompress the area of interest to disk.
+
+Processing of `Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images with **OTB Applications**
+-----------------------------------------------------------------------------------------------------
+
+The **OTB Applications** are able to work directly with
+`Pleiades <http://smsc.cnes.fr/PLEIADES/index.htm>`_  images. However,
+keep in mind that performances may be limited due to the reasons exposed
+in the foreword of this section. If you experiment poor performances
+with some application, try to uncompress the area of interest from your
+image with **Monteverdi** first. One can also use the *ExtractROI*
+application for this purpose.
+
+One thing that is interesting to know is that one can access the coarser
+resolution of the `Jpeg2000 <http://en.wikipedia.org/wiki/JPEG_2000>`_
+file by appending :math:`:i` to the filename, where :math:`i` is the
+resolution level starting at 0. For instance, one can use the following:
+
+::
+
+    otbcli_ExtractROI -in IMG_PHR1A_PMS_201201151100183_SEN_IPU_20120222_0901-001_R2C1.JP2:5 -out test.tif uint16
diff --git a/Documentation/Cookbook/rst/recipes/residual_registration.rst b/Documentation/Cookbook/rst/recipes/residual_registration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5afb9c48ee5d4cd12715d31103fec660f5814dd3
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/residual_registration.rst
@@ -0,0 +1,232 @@
+Residual registration
+---------------------
+
+Image registration is a fundamental problem in image processing. The aim
+is to align two or more images of the same scene often taken at
+different times, from different viewpoints, or by different sensors. It
+is a basic step for orthorectification, image stitching, image fusion,
+change detection etc. But this process is also critical for stereo
+reconstruction process to be able to obtain an accurate estimation of
+epipolar geometry.
+
+Sensor model is generally not sufficient to provide image registrations.
+Indeed, several sources of geometric distortion can be contained in
+optical remote sensing images including earth rotation, platform
+movement, non linearity etc.
+
+They result in geometric errors on scene level, image level and pixel
+level. It is critical to rectify the errors before a thematic map is
+generated, especially when the remote sensing data need to be integrated
+together with other GIS data.
+
+This figure illustrates the generic workflow in the case of image series
+registration:
+
+                             +--------------------------+
+                             |       InputSeries        |
+                             +--------------------------+
+                             |       Sensor Model       |
+                             +--------------------------+
+                             |           DEM            |
+                             +--------------------------+
+                             |  Geo-referenced Series   |
+                             +--------------------------+
+                             |    Homologous Points     |
+                             +--------------------------+
+                             | Bundle-block Adjustement |
+                             +--------------------------+
+                             |    Fine Registration     |
+                             +--------------------------+
+                             |   Registered Series      |
+                             +--------------------------+
+                             |     Map Projection       |
+                             +--------------------------+ 
+                             |   Cartographic Series    |
+                             +--------------------------+
+
+We will now illustrate this process by applying this workflow to
+register two images. This process can be easily extended to perform
+image series registration.
+
+The aim of this example is to describe how to register a Level 1
+QuickBird image over an orthorectify Pleiades image over the area of
+Toulouse, France.
+
+|image1| |image2| 
+
+Figure 4.10: From left to right: Pleiades ortho-image, and original QuickBird image over Toulouse
+
+Extract metadata from the image reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We first dump geometry metadata of the image we want to refine in a text
+file. In OTB, we use the extension *.geom* for this type of file. As you
+will see the application which will estimate a refine geometry only
+needs as input this metadata and a set of homologous points. The
+refinement application will create a new *.geom* file containing refined
+geometry parameters which can be used after for reprojection for
+example.
+
+The use of external *.geom* file is available in OTB since release
+:math:`3.16`. See
+`here <http://wiki.orfeo-toolbox.org/index.php/ExtendedFileName>`__ for
+more information.
+
+::
+
+
+    otbcli_ReadImageInfo   -in slave_image
+                           -outkwl TheGeom.geom
+
+Extract homologous points from images
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main idea of the residual registration is to estimate an second
+transformation (after the application of sensors model).
+
+The homologous point application use interest point detection method to
+get a set of point which match in both images.
+
+The basic idea is to use this set of homologous points and estimate with
+them a residual transformation between the two images.
+
+There is a wide variety of keypoint detector in the literature. They
+allow to detect and describe local features in images. These algorithms
+provide for each interesting point a “feature description”. This
+descriptor has the property to be invariant to image translation,
+scaling, and rotation, partially invariant to illumination changes and
+robust to local geometric distortion. keypoints. Features extracted from
+the input images are then matched against each other. These
+correspondences are then used to create the homologous points.
+
+`SIFT <http://en.wikipedia.org/wiki/Scale-invariant_feature_transform>`__
+or `SURF <http://en.wikipedia.org/wiki/SURF>`__ keypoints can be
+computed in the application. The band on which keypoints are computed
+can be set independently for both images.
+
+The application offers two modes :
+
+-  the first is the full mode where keypoints are extracted from the
+   full extent of both images (please note that in this mode large image
+   file are not supported).
+
+-  The second mode, called *geobins*, allows to set-up spatial binning
+   so as to get fewer points spread across the entire image. In this
+   mode, the corresponding spatial bin in the second image is estimated
+   using geographical transform or sensor modeling, and is padded
+   according to the user defined precision.
+
+Moreover, in both modes the application can filter matches whose
+co-localization in the first image exceed this precision. Last, the
+elevation parameters allow to deal more precisely with sensor modelling
+in case of sensor geometry data. The *outvector* option allows to create
+a vector file with segments corresponding to the localization error
+between the matches.
+
+Finally, with the *2wgs84* option, you can match two sensor geometry
+images or a sensor geometry image with an ortho-rectified reference. In
+all cases, you get a list of ground control points spread all over your
+image.
+
+::
+
+
+
+    otbcli_HomologousPointsExtraction   -in1 slave_image
+                                        -in2 reference_image
+                                        -algorithm surf
+                                        -mode geobins
+                                        -mode.geobins.binstep 512
+                                        -mode.geobins.binsize 512
+                                        -mfilter 1
+                                        -precision 20
+                                        -2wgs84 1
+                                        -out homologous_points.txt
+                                        -outvector points.shp
+                                        -elev.dem dem_path/SRTM4-HGT/
+                                        -elev.geoid OTB-Data/Input/DEM/egm96.grd
+
+Note that for a proper use of the application, elevation must be
+correctly set (including DEM and geoid file).
+
+Geometry refinement using homologous points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we can use this set of tie points to estimate a residual
+transformation.For this we use the dedicated application called
+**RefineSensorModel**. This application make use of OSSIM capabilities
+to align the sensor model.
+
+It reads the input geometry metadata file (*.geom*) which contains the
+sensor model information that we want to refine and the text file
+(homologous\_points.txt) containing the list of ground control point. It
+performs a least-square fit of the sensor model adjustable parameters to
+these tie points and produces an updated geometry file as output (the
+extension which is always use is *.geom*)
+
+The application can provide as well an optional ground control points
+based statistics file and a vector file containing residues that you can
+display in a GIS software.
+
+Please note again that for a proper use of the application, elevation
+must be correctly set (including DEM and geoid file). The map parameters
+allows to choose a map projection in which the accuracy will be
+estimated (in meters).
+
+Accuracy values are provided as output of the application (computed
+using tie points location) and allow also to control the precision of
+the estimated model.
+
+::
+
+
+    otbcli_RefineSensorModel   -elev.dem dem_path/SRTM4-HGT/
+                               -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                               -ingeom slave_image.geom
+                               -outgeom refined_slave_image.geom
+                               -inpoints homologous_points.txt
+                               -outstat stats.txt
+                               -outvector refined_slave_image.shp
+
+Orthorecrtify image using the affine geometry
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now we will show how we can use this new sensor model. In our case we’ll
+use this sensor model to orthorectify the image over the Pléiades
+reference. **Orfeo Toolbox** offers since version 3.16 the possibility
+to use
+hrefhttp://wiki.orfeo-toolbox.org/index.php/ExtendedFileNameextend image
+path to use different metadata file as input. That’s what we are going
+to use there to orthorectify the QuickBird image using the *.geom* file
+obtained by the **RefineSensorModel** applications. over the first one
+using for the second image estimated sensor model which take into
+account the original sensor model of the slave and which also fit to the
+set of tie points.
+
+::
+
+
+    otbcli_OrthoRectification   -io.in slave_image?&geom=TheRefinedGeom.geom
+                                -io.out ortho_slave_image
+                                -elev.dem dem_path/SRTM4-HGT/
+                                -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                         
+
+As a result, if you’ve got enough homologous points in images and
+control that the residual error between the set of tie points and the
+estimated sensor model is small, you must achieve a good registration
+now between the 2 rectified images. Normally far better than ’only’
+performing separate orthorectification over the 2 images.
+
+This methodology can be adapt and apply in several cases, for example :
+
+-  register stereo pair of images and estimate accurate epipolar
+   geometry
+
+-  registration prior to change detection
+
+.. |image1| image:: ../Art/MonteverdiImages/registration_pleiades_ql.png
+            :scale: 70%
+
+.. |image2| image:: ../Art/MonteverdiImages/registration_quickbird_ql.png
+            :scale: 80%
diff --git a/Documentation/Cookbook/rst/recipes/stereo.rst b/Documentation/Cookbook/rst/recipes/stereo.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ecd605dead356f9434b519c57867f536f9799f2a
--- /dev/null
+++ b/Documentation/Cookbook/rst/recipes/stereo.rst
@@ -0,0 +1,511 @@
+Stereoscopic reconstruction from VHR optical images pair
+========================================================
+
+This section describes how to convert pair of stereo images into
+elevation information.
+
+The standard problem of terrain reconstruction with available **OTB
+Applications** contains the following steps:
+
+-  Estimation of displacements grids for epipolar geometry
+   transformation
+
+-  Epipolar resampling of the image pair using those grids
+
+-  Dense disparity map estimation
+
+-  Projection of the disparities on a Digital Surface Model (DSM)
+
+Let’s go to the third dimension!
+
+Estimate epipolar geometry transformation
+-----------------------------------------
+
+The aim of this application is to generate resampled grids to transform
+images in epipolar geometry. `Epipolar geometry <http://en.wikipedia.org/wiki/Epipolar_geometry>`_ is the geometry of stereo
+vision.
+The operation of stereo rectification determines transformations to
+apply to each image such that pairs of conjugate epipolar lines become
+collinear, parallel to one of the image axes and aligned. In this
+geometry, the objects present on a given row of the left image are also
+located on the same line in the right image.
+
+Applying this transformation reduces the problem of elevation (or stereo
+correspondences determination) to a 1-D problem. We have two images
+image1 and image2 over the same area (the stereo pair) and we assume
+that we know the localization functions (forward and inverse) associated
+for each of these images.
+
+The forward function allows to go from the image referential to the
+geographic referential:
+
+.. math:: (long,lat) = f^{forward}_{image1}(i,j,h)
+
+where h is the elevation hypothesis, :math:`(i,j)` are the pixel
+coordinates in image1 and (long,lat) are geographic coordinates. As you
+can imagine, the inverse function allows to go from geographic
+coordinates to the image geometry.
+
+For the second image, in that case, the expression of the inverse
+function is:
+
+.. math:: (long,lat,h) = f^{inverse}_{image2}(i,j)
+
+Using jointly the forward and inverse functions from the image pair, we
+can construct a co-localization function
+:math:`f_{image1 \rightarrow image2}` between the position of a pixel in
+the first and its position in the second one:
+
+.. math:: (i_{image2},j_{image2}) = f_{image1 \rightarrow image2} (i_{image1} , j_{image1} , h)
+
+The expression of this function is:
+
+.. math:: f_{image1 \rightarrow image2} (i_{image1} , j_{image1} , h) =  f^{inverse}_{image2} f^{forward}_{image1}((i_{image1} , j_{image1}), h)
+
+The expression is not really important, what we need to understand is
+that if we are able to determine for a given pixel in image1 the
+corresponding pixel in image2, as we know the expression of the
+co-localization function between both images, we can determine by
+identification the information about the elevation (variable h in the
+equation)!
+
+We now have the mathematical basis to understand how 3-D information can
+be extracted by examination of the relative positions of objects in the
+two 2-D epipolar images.
+
+The construction of the two epipolar grids is a little bit more
+complicated in the case of VHR optical images.That is because most of
+passive remote sensing from space use a push-broom sensor, which
+corresponds to a line of sensors arranged perpendicularly to the flight
+direction of the spacecraft. This acquisition configuration implies a
+slightly different strategy for stereo-rectification (`see here <http://en.wikipedia.org/wiki/Epipolar_geometry#Epipolar_geometry_of_pushbroom_sensor>`_ ).
+
+We will now explain how to use the *StereoRectificationGridGenerator*
+application to produce two images which are **deformation grids** to
+resample the two images in epipolar geometry.
+
+::
+
+    otbcli_StereoRectificationGridGenerator -io.inleft image1.tif
+                                            -io.inright image2.tif
+                                            -epi.elevation.avg.value 50
+                                            -epi.step 5
+                                            -io.outimage1 outimage1_grid.tif
+                                            -io.outright outimage1_grid.tif
+
+The application estimates the displacement to apply to each pixel in
+both input images to obtain epipolar geometry.The application accept a
+‘step’ parameter to estimate displacements on a coarser grid. Here we
+estimate the displacements every 10 pixels. This is because in most
+cases with a pair of VHR and a small angle between the two images, this
+grid is very smooth. Moreover, the implementation is not *streamable*
+and uses potentially a lot of memory. Therefore it is generally a good
+idea to estimate the displacement grid at a coarser resolution.
+
+The application outputs the size of the output images in epipolar
+geometry. **Note these values**, we will use them in the next step to
+resample the two images in epipolar geometry.
+
+In our case, we have:
+
+::
+
+    Output parameters value:
+    epi.rectsizex: 4462
+    epi.rectsizey: 2951
+    epi.baseline:  0.2094
+
+The epi.baseline parameter provides the mean value (in
+:math:`pixels.meters^{-1}`) of the baseline to sensor altitude ratio. It
+can be used to convert disparities to physical elevation, since a
+disparity of this value will correspond to an elevation offset of one
+meter with respect to the mean elevation.
+
+we can now move forward to the resampling in epipolar geometry.
+
+Resample images in epipolar geometry
+------------------------------------
+
+The former application generates two grids of displacements. The
+*GridBasedImageResampling* allows to resample the two input images in
+the epipolar geometry using these grids. These grids are intermediary
+results not really useful on their own in most cases. This second step
+*only* consists in applying the transformation and resample both images.
+This application can obviously be used in lot of other contexts.
+
+The two commands to generate epipolar images are:
+
+::
+
+    otbcli_GridBasedImageResampling -io.in image1.tif
+                                    -io.out image1_epipolar.tif
+                                    -grid.in outimage1_grid.tif
+                                    -out.sizex 4462
+                                    -out.sizey 2951
+
+::
+
+    otbcli_GridBasedImageResampling -io.in image2.tif
+                                    -io.out image2_epipolar.tif
+                                    -grid.in outimage2_grid.tif
+                                    -out.sizex 4462
+                                    -out.sizey 2951
+
+As you can see, we set *sizex* and *sizey* parameters using output
+values given by the *StereoRectificationGridGenerator* application to
+set the size of the output epipolar images.
+
+.. figure:: ../Art/MonteverdiImages/stereo_image1_epipolar.png
+
+
+.. figure:: ../Art/MonteverdiImages/stereo_image2_epipolar.png
+
+
+Figure 1: Extract of resample image1 and image2 in epipolar geometry over Pyramids of Cheops. ©CNES 2012 
+
+
+We obtain two images in epipolar geometry, as shown in
+`Figure 1`. Note that the application allows to
+resample only a part of the image using the *-out.ulx* and *-out.uly*
+parameters.
+
+Disparity estimation: Block matching along epipolar lines
+---------------------------------------------------------
+
+Finally, we can begin the stereo correspondences lookup process!
+
+Things are becoming a little bit more complex but do not worry. First,
+we will describe the power of the *BlockMatching* application.
+
+The resampling of our images in epipolar geometry allows us to constrain
+the search along a 1-dimensional line as opposed to both dimensions, but
+what is even more important is that the disparities along the lines,
+i.e. the offset along the lines measured by the block-matching process
+can be directly linked to the local elevation
+
+An almost complete spectrum of stereo correspondence algorithms has been
+published and it is still augmented at a significant rate! See for
+example `. <http://en.wikipedia.org/wiki/Block-matching_algorithm>`_ 
+The **Orfeo Toolbox** implements different strategies for block
+matching:
+
+-  Sum of Square Distances block-matching (SSD)
+
+-  Normalized Cross-Correlation (NCC)
+
+-  Lp pseudo-norm (LP)
+
+An other important parameter (mandatory in the application!) is the
+range of disparities. In theory, the block matching can perform a blind
+exploration and search for a infinite range of disparities between the
+stereo pair. We need now to evaluate a range of disparities where the
+block matching will be performed (in the general case from the deepest
+point on Earth, `the Challenger Deep <http://en.wikipedia.org/wiki/Challenger_Deep>`_ . to the Everest
+summit!)
+
+We deliberately exaggerated but you can imagine that without a smaller
+range the block matching algorithm can take a lot of time. That is why
+these parameters are mandatory for the application and as a consequence
+we need to estimate them manually. This is pretty simple using the two
+epipolar images.
+
+In our case, we take one point on a *flat* area. The image coordinate in
+:math:`image_{1}` is :math:`[1970,1525]` and in :math:`image_{2}` is
+:math:`[1970,1526]`. We then select a second point on a higher region
+(in our case a point near the top of the Pyramid of Cheops!). The image
+coordinate of this pixel in :math:`image_{1}` is :math:`[1661,1299]` and
+in :math:`image_{2}` is :math:`[1633,1300]`. So you see for the
+horizontal exploration, we must set the minimum value lower than
+:math:`-30` (the convention for the sign of the disparity range is from
+image1 to image2).
+
+Note that this estimation can be simplified using an external DEM in the
+*StereoRectificationGridGenerator* application. Regarding the vertical
+disparity, in the first step we said that we reduced the problem of 3-D
+extraction to a 1-D problem, but this is not completely true in general
+cases. There might be small disparities in the vertical direction which
+are due to parallax errors (i.e. epipolar lines exhibit a small shift in
+the vertical direction, around 1 pixel). In fact, the exploration is
+typically smaller along the vertical direction of disparities than along
+the horizontal one. You can also estimate them on the epipolar pair (in
+our case we use a range of :math:`-1` to :math:`1`).
+
+One more time, take care of the sign of this minimum and this maximum
+for disparities (always from image1 to image2).
+
+The command line for the *BlockMatching* application is :
+
+::
+
+    otbcli_BlockMatching -io.inleft image1_epipolar.tif
+                         -io.inright image2_epipolar.tif
+                         -io.out disparity_map_ncc.tif
+                         -bm.minhd -45
+                         -bm.maxhd 5
+                         -bm.minvd 1
+                         -bm.maxvd 1
+                         -mask.inleft image1_epipolar_mask.tif
+                         -mask.inright image2_epipolar_mask.tif
+                         -io.outmetric 1
+                         -bm.metric ncc
+                         -bm.subpixel dichotomy
+                         -bm.medianfilter.radius 5
+                         -bm.medianfilter.incoherence 2.0
+
+The application creates by default a two bands image : the horizontal
+and vertical disparities.
+
+The *BlockMatching* application gives access to a lot of other powerful
+functionalities to improve the quality of the output disparity map.
+
+Here are a few of these functionalities:
+
+-  -io.outmetric: if the optimal metric values image is activated, it
+   will be concatenated to the output image (which will then have three
+   bands: horizontal disparity, vertical disparity and metric value)
+
+-  -bm.subpixel: Perform sub-pixel estimation of disparities
+
+-  -mask.inleft and -mask.inright: you can specify a no-data value which
+   will discard pixels with this value (for example the epipolar
+   geometry can generate large part of images with black pixels) This
+   mask can be easily generated using the *BandMath* application:
+
+   ::
+
+       otbcli_BandMath -il image1_epipolar.tif
+                       -out image1_epipolar_mask.tif
+                       -exp "if(im1b1<=0,0,255)"
+
+   ::
+
+       otbcli_BandMath -il image2_epipolar.tif
+                       -out image2_epipolar_mask.tif
+                       -exp "if(im1b1<=0,0,255)"
+
+-  -mask.variancet : The block matching algorithm has difficulties to
+   find matches on uniform areas. We can use the variance threshold to
+   discard those regions and speed-up computation time.
+
+-  -bm.medianfilter.radius 5 and -bm.medianfilter.incoherence 2.0:
+   Applies a median filter to the disparity map. The median filter
+   belongs to the family of nonlinear filters. It is used to smooth an
+   image without being biased by outliers or shot noise. The radius
+   corresponds to the neighbourhood where the median value is computed.
+   A detection of incoherence between the input disparity map and the
+   median-filtered one is performed (a pixel corresponds to an
+   incoherence if the absolute value of the difference between the pixel
+   value in the disparity map and in the median image is higher than the
+   incoherence threshold, whose default value is 1). Both parameters
+   must be defined in the application to activate the filter.
+
+Of course all these parameters can be combined to improve the disparity
+map.
+
+.. figure:: ../Art/MonteverdiImages/stereo_disparity_horizontal.png
+
+
+.. figure:: ../Art/MonteverdiImages/stereo_disparity_metric.png
+
+Figure 2: Horizontal disparity and optimal metric map
+
+
+
+From disparity to Digital Surface Model
+---------------------------------------
+
+Using the previous application, we evaluated disparities between images.
+The next (and last!) step is now to transform the disparity map into an
+elevation information to produce an elevation map. It uses as input the
+disparity maps (horizontal and vertical) to produce a Digital Surface
+Model (DSM) with a regular sampling. The elevation values is computed
+from the triangulation of the “left-right” pairs of matched pixels. When
+several elevations are available on a DSM cell, the highest one is kept.
+
+First, an important point is that it is often a good idea to rework the
+disparity map given by the *BlockMatching* application to only keep
+relevant disparities. For this purpose, we can use the output optimal
+metric image and filter disparities with respect to this value.
+
+For example, if we used Normalized Cross-Correlation (NCC), we can keep
+only disparities where optimal metric value is superior to :math:`0.9`.
+Disparities below this value can be consider as inaccurate and will not
+be used to compute elevation information (the *-io.mask* parameter can
+be used for this purpose).
+
+This filtering can be easily done with **OTB Applications** .
+
+We first use the *BandMath* application to filter disparities according
+to their optimal metric value:
+
+::
+
+    otbcli_BandMath -il disparity_map_ncc.tif
+                    -out thres_hdisparity.tif uint8
+                    -exp "if(im1b3>0.9,255,0)"
+
+Then, we concatenate thresholded disparities using the
+*ConcatenateImages* :
+
+::
+
+    otbcli_ConcatenateImages -il thres_hdisparity.tif thres_vdisparity.tif
+                             -out thres_hvdisparity.tif
+
+Now, we can use the *DisparityMapToElevationMap* application to compute
+the elevation map from the filtered disparity maps.
+
+::
+
+    otbcli_DisparityMapToElevationMap -io.in disparity_map_ncc.tif
+                                      -io.left image1.tif
+                                      -io.right image2.tif
+                                      -io.lgrid outimage1_pyramid.tif
+                                      -io.rgrid outimage2_pyramid.tif
+                                      -io.mask thres_hdisparity.tif
+                                      -io.out disparity_map_ssd_to_elevation.tif
+                                      -hmin 10
+                                      -hmax 400
+                                      -elev.default 50
+
+It produces the elevation map projected in WGS84 (EPSG
+code:\ :math:`4326`) over the ground area covered by the stereo pair.
+Pixels values are expressed in meters.
+
+.. figure:: ../Art/MonteverdiImages/stereo_dem_zoom.png
+
+Figure 3: Extract of the elevation map over Pyramids of Cheops. 
+
+This is it `Figure 3` shows the output DEM from the Cheops pair.
+
+One application to rule them all in multi stereo framework scheme
+-----------------------------------------------------------------
+
+An application has been added to fuse one or multiple stereo
+reconstruction(s) using all in one approach : *StereoFramework* . It
+computes the DSM from one or several stereo pair. First of all the user
+have to choose his input data and defines stereo couples using
+*-input.co* string parameter. This parameter use the following
+formatting convention “ :math:`index_{0}` :math:`index_{1}`,
+:math:`index_{2}` :math:`index_{3}`, …”, which will create a first
+couple with image :math:`index_{0}` and :math:`index_{1}`, a second with
+image :math:`index_{1}` and :math:`index_{2}`, and so on. If left blank
+images are processed by pairs (which is equivalent as using “ 0 1,2 3,4
+5 ” …). In addition to the usual elevation and projection parameters,
+main parameters have been splitted in groups detailled below:
+
+Output :
+    output parameters : DSM resolution, NoData value, Cell Fusion
+    method,
+
+    -  : output projection map selection.
+
+    -  : Spatial Sampling Distance of the output DSM in meters
+
+    -  : DSM empty cells are filled with this float value (-32768 by
+       default)
+
+    -  : Choice of fusion strategy in each DSM cell (max, min, mean,
+       acc)
+
+    -  : Output DSM
+
+    -  : Output DSM extent choice
+
+Stereorect :
+    Direct and inverse stereorectification grid subsampling parameters
+
+    -  : Step of the direct deformation grid (in pixels)
+
+    -  : Sub-sampling of the inverse epipolar grid
+
+BM :
+    Block Matching parameters.
+
+    -  : Block-matching metric choice (robust SSD, SSD, NCC, Lp Norm)
+
+    -  : Radius of blocks for matching filter (in pixels, :math:`2` by
+       default)
+
+    -  : Minimum altitude below the selected elevation source (in
+       meters, -20.0 by default)
+
+    -  : Maximum altitude above the selected elevation source (in
+       meters, 20.0 by default)
+
+Postproc :
+    Post-Processing parameters
+
+    -  : use bijection consistency. Right to Left correlation is
+       computed to validate Left to Right disparities. If bijection is
+       not found pixel is rejected
+
+    -  : use median disparities filtering (disabled by default)
+
+    -  : use block matching metric output to discard pixels with low
+       correlation value (disabled by default, float value)");
+
+Mask :
+    Compute optional intermediate masks.
+
+    -  : Mask for left input image (must have the same size for all
+       couples)
+
+    -  : Mask for right input image (must have the same size for all
+       couples)
+
+    -  : This parameter allows to discard pixels whose local variance is
+       too small. The size of the neighborhood is given by the radius
+       parameter. (disabledd by default)
+
+Stereo reconstruction good practices
+------------------------------------
+
+The parameters and are used inside the application to derive the minimum
+and maximum horizontal disparity exploration, so they have a critical
+impact on computation time. It is advised to choose an elevation source
+that is not too far from the DSM you want to produce (for instance, an
+SRTM elevation model). Therefore, the altitude from your elevation
+source will be already taken into account in the epipolar geometry and
+the disparities will reveal the elevation offsets (such as buildings).
+It allows you to use a smaller exploration range along the elevation
+axis, causing a smaller exploration along horizontal disparities and
+faster computation.
+
+and have also a deep impact in time consumption, thus they have to be
+carefully chosen in case of large image processing.
+
+To reduce time consumption it would be useful to crop all sensor images
+to the same extent. The easiest way to do that is to choose an image as
+reference, and then apply *ExtractROI* application on the other sensor
+images using the fit mode option.
+
+Algorithm outline
+-----------------
+
+The following algorithms are used in the application: For each sensor
+pair
+
+-  Compute the epipolar deformation grids from the stereo pair (direct
+   and inverse)
+
+-  Resample into epipolar geometry with BCO interpolator
+
+-  Create masks for each epipolar image : remove black borders and
+   resample input masks
+
+-  Compute horizontal disparities with a block matching algorithm
+
+-  Refing Disparities to sub-pixel precision with a dichotomy algorithm
+
+-  Apply an optional Median filter
+
+-  Filter disparities based on the correlation score (optional) and
+   exploration bounds
+
+-  Translate disparities in sensor geometry
+
+-  Convert disparity map to 3D map
+
+Then fuse all 3D maps to produce DSM with desired geographic or
+cartographic projection and parametrizable extent.
diff --git a/Documentation/Cookbook/rst/requirements.txt b/Documentation/Cookbook/rst/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..21894cd8b49377abdb0ec6cc760a4a183186bf4a
--- /dev/null
+++ b/Documentation/Cookbook/rst/requirements.txt
@@ -0,0 +1 @@
+sphinxcontrib-inlinesyntaxhighlight
diff --git a/Documentation/Cookbook/rst/residual_registration.rst b/Documentation/Cookbook/rst/residual_registration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e07652163eaa8d0db6bc8e0c25c0d1d700effc2b
--- /dev/null
+++ b/Documentation/Cookbook/rst/residual_registration.rst
@@ -0,0 +1,233 @@
+Residual registration
+---------------------
+
+Image registration is a fundamental problem in image processing. The aim
+is to align two or more images of the same scene often taken at
+different times, from different viewpoints, or by different sensors. It
+is a basic step for orthorectification, image stitching, image fusion,
+change detection…But this process is also critical for stereo
+reconstruction process to be able to obtain an accurate estimation of
+epipolar geometry.
+
+Sensor model is generally not sufficient to provide image registrations.
+Indeed, several sources of geometric distortion can be contained in
+optical remote sensing images including earth rotation, platform
+movement, non linearity…
+
+They result in geometric errors on scene level, image level and pixel
+level. It is critical to rectify the errors before a thematic map is
+generated, especially when the remote sensing data need to be integrated
+together with other GIS data.
+
+This figure illustrates the generic workflow in the case of image series
+registration:
+
+[scale=0.15] (-1,-12) rectangle (75,17); in 5,...,1 (,) rectangle
++(4,4); (InputSeries) at (4,-1) Input series; (9,5) – +(3,0); (12.2,3)
+rectangle +(6,4); (SensorModel) at (15,5) Sensor Model; (1,-10)
+rectangle +(4,4); (DEM) at (5,-11) DEM; (3,-5.5) – ++(0,3) – ++(12,0) –
+++(0,5); (18.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4);
+(GeoRefSeries) at (28,-1) Geo-referenced Series;
+
+(25.5,8.5) – +(0,3);
+
+(22,12) rectangle +(8.5,4); (HomPoExtr) at (27,14) Homologous Points;
+
+(21.5,14) – +(-2.5,0);
+
+(11,12) rectangle +(8,4); (BBAdj) at (15.5,14) Bundle-block Adjustement;
+
+(15,11.5) – +(0,-4);
+
+(30,5) – +(3,0); (33.2,2.5) rectangle +(6,4.5); (FineRegistration) at
+(36,4.9) Fine Registration;
+
+(39.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4); (RegistSeries) at
+(47,-1) Registered Series; (36,2) – ++(0,-10) – ++(-30,0);
+
+(52,5) – +(3,0); (55.2,2.5) rectangle +(6,4.5); (CartoProjection) at
+(57.5,4.9) Map Projection;
+
+(61.5,5) – +(3,0); in 5,...,1 (,) rectangle +(4,4); (CartoSeries) at
+(68,-1) Cartographic Series;
+
+We will now illustrate this process by applying this workflow to
+register two images. This process can be easily extended to perform
+image series registration.
+
+The aim of this example is to describe how to register a Level 1
+QuickBird image over an orthorectify Pleiades image over the area of
+Toulouse, France.
+
+|image| |image| [fig:InputImagesRegistration]
+
+Extract metadata from the image reference
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+We first dump geometry metadata of the image we want to refine in a text
+file. In OTB, we use the extension *.geom* for this type of file. As you
+will see the application which will estimate a refine geometry only
+needs as input this metadata and a set of homologous points. The
+refinement application will create a new *.geom* file containing refined
+geometry parameters which can be used after for reprojection for
+example.
+
+The use of external *.geom* file is available in OTB since release
+:math:`3.16`. See
+`here <http://wiki.orfeo-toolbox.org/index.php/ExtendedFileName>`__ for
+more information.
+
+::
+
+
+    otbcli_ReadImageInfo   -in slave_image
+                           -outkwl TheGeom.geom
+
+Extract homologous points from images
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The main idea of the residual registration is to estimate an second
+transformation (after the application of sensors model).
+
+The homologous point application use interest point detection method to
+get a set of point which match in both images.
+
+The basic idea is to use this set of homologous points and estimate with
+them a residual transformation between the two images.
+
+There is a wide variety of keypoint detector in the literature. They
+allow to detect and describe local features in images. These algorithms
+provide for each interesting point a “feature description”. This
+descriptor has the property to be invariant to image translation,
+scaling, and rotation, partially invariant to illumination changes and
+robust to local geometric distortion. keypoints. Features extracted from
+the input images are then matched against each other. These
+correspondences are then used to create the homologous points.
+
+`SIFT <http://en.wikipedia.org/wiki/Scale-invariant_feature_transform>`__
+or `SURF <http://en.wikipedia.org/wiki/SURF>`__ keypoints can be
+computed in the application. The band on which keypoints are computed
+can be set independently for both images.
+
+The application offers two modes :
+
+-  the first is the full mode where keypoints are extracted from the
+   full extent of both images (please note that in this mode large image
+   file are not supported).
+
+-  The second mode, called *geobins*, allows to set-up spatial binning
+   so as to get fewer points spread across the entire image. In this
+   mode, the corresponding spatial bin in the second image is estimated
+   using geographical transform or sensor modeling, and is padded
+   according to the user defined precision.
+
+Moreover, in both modes the application can filter matches whose
+co-localization in the first image exceed this precision. Last, the
+elevation parameters allow to deal more precisely with sensor modelling
+in case of sensor geometry data. The *outvector* option allows to create
+a vector file with segments corresponding to the localization error
+between the matches.
+
+Finally, with the *2wgs84* option, you can match two sensor geometry
+images or a sensor geometry image with an ortho-rectified reference. In
+all cases, you get a list of ground control points spread all over your
+image.
+
+::
+
+
+
+    otbcli_HomologousPointsExtraction   -in1 slave_image
+                                        -in2 reference_image
+                                        -algorithm surf
+                                        -mode geobins
+                                        -mode.geobins.binstep 512
+                                        -mode.geobins.binsize 512
+                                        -mfilter 1
+                                        -precision 20
+                                        -2wgs84 1
+                                        -out homologous_points.txt
+                                        -outvector points.shp
+                                        -elev.dem dem_path/SRTM4-HGT/
+                                        -elev.geoid OTB-Data/Input/DEM/egm96.grd
+
+Note that for a proper use of the application, elevation must be
+correctly set (including DEM and geoid file).
+
+Geometry refinement using homologous points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we can use this set of tie points to estimate a residual
+transformation.For this we use the dedicated application called
+**RefineSensorModel**. This application make use of OSSIM capabilities
+to align the sensor model.
+
+It reads the input geometry metadata file (*.geom*) which contains the
+sensor model information that we want to refine and the text file
+(homologous\_points.txt) containing the list of ground control point. It
+performs a least-square fit of the sensor model adjustable parameters to
+these tie points and produces an updated geometry file as output (the
+extension which is always use is *.geom*)
+
+The application can provide as well an optional ground control points
+based statistics file and a vector file containing residues that you can
+display in a GIS software.
+
+Please note again that for a proper use of the application, elevation
+must be correctly set (including DEM and geoid file). The map parameters
+allows to choose a map projection in which the accuracy will be
+estimated (in meters).
+
+Accuracy values are provided as output of the application (computed
+using tie points location) and allow also to control the precision of
+the estimated model.
+
+::
+
+
+    otbcli_RefineSensorModel   -elev.dem dem_path/SRTM4-HGT/
+                               -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                               -ingeom slave_image.geom
+                               -outgeom refined_slave_image.geom
+                               -inpoints homologous_points.txt
+                               -outstat stats.txt
+                               -outvector refined_slave_image.shp
+
+Orthorecrtify image using the affine geometry
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now we will show how we can use this new sensor model. In our case we’ll
+use this sensor model to orthorectify the image over the Pléiades
+reference. offers since version 3.16 the possibility to use
+hrefhttp://wiki.orfeo-toolbox.org/index.php/ExtendedFileNameextend image
+path to use different metadata file as input. That’s what we are going
+to use there to orthorectify the QuickBird image using the *.geom* file
+obtained by the **RefineSensorModel** applications. over the first one
+using for the second image estimated sensor model which take into
+account the original sensor model of the slave and which also fit to the
+set of tie points.
+
+::
+
+
+    otbcli_OrthoRectification   -io.in slave_image?&geom=TheRefinedGeom.geom
+                                -io.out ortho_slave_image
+                                -elev.dem dem_path/SRTM4-HGT/
+                                -elev.geoid OTB-Data/Input/DEM/egm96.grd
+                         
+
+As a result, if you’ve got enough homologous points in images and
+control that the residual error between the set of tie points and the
+estimated sensor model is small, you must achieve a good registration
+now between the 2 rectified images. Normally far better than ’only’
+performing separate orthorectification over the 2 images.
+
+This methodology can be adapt and apply in several cases, for example :
+
+-  register stereo pair of images and estimate accurate epipolar
+   geometry
+
+-  registration prior to change detection
+
+.. |image| image:: ../Art/MonteverdiImages/registration_pleiades_ql.png
+.. |image| image:: ../Art/MonteverdiImages/registration_quickbird_ql.png
diff --git a/Documentation/Cookbook/rst/sarprocessing.rst b/Documentation/Cookbook/rst/sarprocessing.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b3380017a51a3fd2d48d04c16b16bea0600e3843
--- /dev/null
+++ b/Documentation/Cookbook/rst/sarprocessing.rst
@@ -0,0 +1,881 @@
+SAR processing
+==============
+
+This section describes how to use the applications related to SAR
+processing.
+
+Calibration
+-----------
+
+The application SarRadiometricCalibration can deal with the calibration
+of data from four radar sensors: RadarSat2, Sentinel1, COSMO-SkyMed and
+TerraSAR-X.
+
+Examples :
+
+If SARimg.tif is a TerraSAR-X or a COSMO-SkyMed image :
+
+::
+
+    otbcli_SarRadiometricCalibration -in SARimg.tif 
+                                     -out SARimg-calibrated.tif 
+
+If SARimg.tif is a RadarSat2 or a Sentinel1 image, it ’s possible to
+specify the look-up table (automatically found in the metadata provided
+with such image) :
+
+::
+
+    otbcli_SarRadiometricCalibration -in SARimg.tif 
+                                     -lut gamma
+                                 -out SARimg-calibrated.tif 
+
+For TerraSAR-X (and soon for RadarSat2 and Sentinel1), it is also
+possible to use a noise LUT to derive calibrated noise profiles :
+
+::
+
+    otbcli_SarRadiometricCalibration -in SARimg.tif 
+                                     -lut gamma -noise 1
+                                     -out SARimg-calibrated.tif 
+
+Despeckle
+---------
+
+SAR images are generally corrupted by speckle noise. To suppress speckle
+and improve the radar image analysis lots of filtering techniques have
+been proposed. The module implements to well-known despeckle methods:
+Frost, Lee, Gamma-MAP and Kuan.
+
+Figure ([ffig:S1VVdespeckledextract] shows an extract of a SLC Sentinel1
+image, band VV, taken over Cape Verde and the result of the Gamma
+filter. The following commands were used to produce the despeckled
+extract :
+
+First, the original image is converted into an intensity one (real part
+corresponds to band 1, and imaginary part to band 2):
+
+::
+
+    otbcli_BandMath -il S1-VV-extract.tif 
+                    -exp im1b1^2+im1b2^2 
+                    -out S1-VV-extract-int.tif 
+
+Then the intensity image is despeckled with the Gamma-MAP filter :
+
+::
+
+    otbcli_Despeckle -in S1-VV-extract-int.tif 
+                     -filter.gammamap.rad 5
+                     -filter.gammamap.nblooks 1 
+                     -out S1-VV-despeckled-extract.tif 
+
+The produced images were then rescaled to intensities ranging from 0 to
+255 in order to be displayed.
+
+|image| |image| [fig:S1VVdespeckledextract]
+
+Polarimetry
+-----------
+
+In conventional imaging radar the measurement is a scalar which is
+proportional to the received back-scattered power at a particular
+combination of linear polarization (HH, HV, VH or VV). Polarimetry is
+the measurement and interpretation of the polarization of this
+measurement which allows to measure various optical properties of a
+material. In polarimetry the basic measurement is a :math:`2x2` complex
+scattering matrix yielding an eight dimensional measurement space
+(Sinclair matrix). For reciprocal targets where :math:`HV=VH`, this
+space is compressed to five dimensions: three amplitudes (:math:`|HH|`,
+:math:`|HV|`, and :math:`|VV|`); and two phase measurements, (co-pol:
+HH-VV, and cross-pol: HH-HV). (see
+`grss-ieee <http://www.grss-ieee.org/technical-briefs/imaging-radar-polarimetry>`__).
+
+Matrix conversions
+~~~~~~~~~~~~~~~~~~
+
+This applications allows converting classical polarimetric matrices to
+each other. For instance, it is possible to get the coherency matrix
+from the Sinclair one, or the Mueller matrix from the coherency one. The
+figure below ([fig:polconv]) shows the workflow used in this
+application.
+
+|image| [fig:polconv]
+
+The filters used in this application never handle matrices, but images
+where each band is related to their elements. As most of the time SAR
+polarimetry handles symmetric matrices, only the relevant elements are
+stored, so that the images representing them have a minimal number of
+bands. For instance, the coherency matrix size is 3x3 in the monostatic
+case, and 4x4 in the bistatic case : it will thus be stored in a 6-band
+or a 10-band complex image (the diagonal and the upper elements of the
+matrix).
+
+The Sinclair matrix is a special case : it is always represented as 3 or
+4 one-band complex images (for mono- or bistatic case).
+
+There are 13 available conversions, each one being related to the
+following parameters:
+
+#. msinclairtocoherency
+
+#. msinclairtocovariance
+
+#. msinclairtocircovariance
+
+#. mcoherencytomueller
+
+#. mcovariancetocoherencydegree
+
+#. mcovariancetocoherency
+
+#. mlinearcovariancetocircularcovariance
+
+#. muellertomcovariance
+
+#. bsinclairtocoherency
+
+#. bsinclairtocovariance
+
+#. bsinclairtocircovariance
+
+#. sinclairtomueller
+
+#. muellertopoldegandpower
+
+For each option parameter, the list below gives the formula used.
+
+— Monostatic case —
+
+#. msinclairtocoherency (SinclairToReciprocalCoherencyMatrixFunctor)
+
+   #. :math:` 0.5 . (S_{hh}+S_{vv}).(S_{hh}+S_{vv})^{*} `
+
+   #. :math:` 0.5 . (S_{hh}+S_{vv}).(S_{hh}-S_{vv})^{*} `
+
+   #. :math:` 0.5 . (S_{hh}+S_{vv}).(2 S_{hv})^{*} `
+
+   #. :math:` 0.5 . (S_{hh}-S_{vv}).(S_{hh}-S_{vv})^{*} `
+
+   #. :math:` 0.5 . (S_{hh}-S_{vv}).(2 S_{hv})^{*} `
+
+   #. :math:` 0.5 . (2 S_{hv}).(2 S_{hv})^{*} `
+
+#. msinclairtocovariance (SinclairToReciprocalCovarianceMatrixFunctor)
+
+   #. :math:` S_{hh}.S_{hh}^{*} `
+
+   #. :math:` \sqrt{2}.S_{hh}.S_{hv}^{*} `
+
+   #. :math:` S_{hh}.S_{vv}^{*} `
+
+   #. :math:` 2.S_{hv}.S_{hv}^{*} `
+
+   #. :math:` \sqrt{2}.S_{hv}.S_{vv}^{*} `
+
+   #. :math:` S_{vv}.S_{vv}^{*} `
+
+#. msinclairtocircovariance
+   (SinclairToReciprocalCircularCovarianceMatrixFunctor)
+
+   #. :math:` S_{ll}.S_{ll}^{*} `
+
+   #. :math:` S_{ll}.S_{lr}^{*} `
+
+   #. :math:` S_{ll}.S_{rr}^{*} `
+
+   #. :math:` S_{lr}.S_{lr}^{*} `
+
+   #. :math:` S_{lr}.S_{rr}^{*} `
+
+   #. :math:` S_{rr}.S_{rr}^{*} `
+
+   With:
+
+   -  :math:` S_{ll} = 0.5(S_{hh}+2j S_{hv}-S_{vv}) `
+
+   -  :math:` S_{lr} = 0.5(j S_{hh}+j S_{vv}) `
+
+   -  :math:` S_{rr} = 0.5(-S_{hh}+2j S_{hv}+S_{vv}) `
+
+#. mcoherencytomueller (ReciprocalCoherencyToReciprocalMuellerFunctor)
+
+   #. :math:` 0.5*( C_{11}+C_{22}+C_{33} ) `
+
+   #. :math:` Re(C_{12}) + Im(C_{22}) `
+
+   #. :math:` Re(C_{13}) `
+
+   #. :math:` Im(C_{23}) `
+
+   #. :math:` Re(C_{12}) `
+
+   #. :math:` 0.5*( C_{11}+C_{22}-C_{33} ) `
+
+   #. :math:` Re(C_{23}) `
+
+   #. :math:` Im(C_{13}) `
+
+   #. :math:` -Re(C_{13}) `
+
+   #. :math:` -Re(C_{23}) `
+
+   #. :math:` 0.5.Re(VAL1) `
+
+   #. :math:` 0.5.Im(VAL0) `
+
+   #. :math:` Im(C_{23}) `
+
+   #. :math:` Im(C_{13}) `
+
+   #. :math:` 0.5.Im(VAL1^{*}) `
+
+   #. :math:` 0.5.Re(VAL0) `
+
+   With:
+
+   -  :math:` VAL0 = C_{33}+C_{12}-C_{11}-(C_{12}-C_{22})^{*}  `
+
+   -  :math:` VAL1 = -C_{33}+C_{12}-C_{11}-(C_{12}-C_{22})^{*} `
+
+   Where :math:`C_{ij}` are related to the elements of the reciprocal
+   coherence matrix.
+
+#. mcovariancetocoherencydegree
+   (ReciprocalCovarianceToCoherencyDegreeFunctor)
+
+   #. :math:` abs(S_{hh}.S_{vv}^{*}) / sqrt(S_{hh}.S_{hh}^{*}) / sqrt(S_{vv}.S_{vv}^{*}) `
+
+   #. :math:` abs(S_{hv}.S_{vv}^{*}) / sqrt(S_{hv}.S_{hv}^{*}) / sqrt(S_{vv}.S_{vv}^{*}) `
+
+   #. :math:` abs(S_{hh}.S_{hv}^{*}) / sqrt(S_{hh}.S_{hh}^{*}) / sqrt(S_{hv}.S_{hv}^{*}) `
+
+#. mcovariancetocoherency
+   (ReciprocalCovarianceToReciprocalCoherencyFunctor)
+
+   #. :math:` 0.5 . ( C_{33} + C_{13} + C_{13}^{*} + C_{11} ) `
+
+   #. :math:` 0.5 . ( -C_{33} - C_{13} + C_{13}^{*} + C_{11} ) `
+
+   #. :math:` 0.5 . ( \sqrt{2}.C_{12} + \sqrt{2}.C_{23}^{*} ) `
+
+   #. :math:` 0.5 . ( C_{33} - C_{13} - C_{13}^{*} + C_{11} ) `
+
+   #. :math:` 0.5 . ( \sqrt{2}.C_{12} - \sqrt{2}.C_{23}^{*} ) `
+
+   #. :math:` 0.5 . ( 2 . C_{22} ) `
+
+   Where :math:`C_{ij}` are related to the elements of the reciprocal
+   linear covariance matrix.
+
+#. mlinearcovariancetocircularcovariance
+   (ReciprocalLinearCovarianceToReciprocalCircularCovarianceFunctor)
+
+   #. :math:` 0.25 . ( C_{33}-i.\sqrt{2}.C_{23}-C_{13}+i.\sqrt{2}.C_{23}^{*}-C_{13}^{*}+2.C_{22}-i.\sqrt{2}.C_{12}+i.\sqrt{2}.C_{12}^{*}+C_{11} ) `
+
+   #. :math:` 0.25 . ( i.\sqrt{2}.C_{33}+2.C_{23}-i.\sqrt{2}.C_{13}+i.\sqrt{2}.C_{13}^{*}+2.C_{12}^{*}-i.\sqrt{2}.C_{11} ) `
+
+   #. :math:` 0.25 . ( -C_{33}+i.\sqrt{2}.C_{23}+C_{13}+i.\sqrt{2}.C_{23}^{*}+C_{13}^{*}+2.C_{22}-i.\sqrt{2}.C_{12}-i.\sqrt{2}.C_{12}^{*}-C_{11} ) `
+
+   #. :math:` 0.25 . ( 2.C_{33}+2.C_{13}+2.C_{13}^{*}+2.C_{11} ) `
+
+   #. :math:` 0.25 . ( i.\sqrt{2}.C_{33}+i.\sqrt{2}.C_{13}+2.C_{23}^{*}-i.\sqrt{2}.C_{13}^{*}+2.C_{12}-i.\sqrt{2}.C_{11} ) `
+
+   #. :math:` 0.25 . ( C_{33}+i.\sqrt{2}.C_{23}-C_{13}-i.\sqrt{2}.C_{23}^{*}-C_{13}^{*}+2.C_{22}+i.\sqrt{2}.C_{12}-i.\sqrt{2}.C_{12}^{*}+C_{11} ) `
+
+   Where :math:`C_{ij}` are related to the elements of the reciprocal
+   linear covariance matrix.
+
+#. muellertomcovariance (MuellerToReciprocalCovarianceFunctor)
+
+   #. :math:` 0.5.(M_{11}+M_{22}+2.M_{12}) `
+
+   #. :math:` 0.5.\sqrt{2}.[(M_{13}+M_{23}) + j.(M_{14}+M_{24})] `
+
+   #. :math:` -0.5.(M_{33}+M_{44}) - j.M_{34} `
+
+   #. :math:` M_{11}-M_{22} `
+
+   #. :math:` 0.5.\sqrt{2}.[(M_{13}-M_{23}) + j.(M_{14}-M_{24})] `
+
+   #. :math:` 0.5.(M_{11}+M_{22}-2.M_{12}) `
+
+— Bistatic case —
+
+#. bsinclairtocoherency (SinclairToCoherencyMatrixFunctor)
+
+   #. :math:` (S_{hh}+S_{vv}).(S_{hh}+S_{vv})^{*} `
+
+   #. :math:` (S_{hh}+S_{vv}).(S_{hh}-S_{vv})^{*} `
+
+   #. :math:` (S_{hh}+S_{vv}).(S_{hv}+S_{vh})^{*} `
+
+   #. :math:` (S_{hh}+S_{vv}).( j (S_{hv}-S_{vh}))^{*} `
+
+   #. :math:` (S_{hh}-S_{vv}).(S_{hh}-S_{vv})^{*} `
+
+   #. :math:` (S_{hh}-S_{vv}).(S_{hv}+S_{vh})^{*} `
+
+   #. :math:` (S_{hh}-S_{vv}).( j (S_{hv}-S_{vh}))^{*} `
+
+   #. :math:` (S_{hv}+S_{vh}).(S_{hv}+S_{vh})^{*} `
+
+   #. :math:` (S_{hv}+S_{vh}).( j (S_{hv}-S_{vh}))^{*} `
+
+   #. :math:` j (S_{hv}-S_{vh}).( j (S_{hv}-S_{vh}))^{*} `
+
+#. bsinclairtocovariance (SinclairToCovarianceMatrixFunctor)
+
+   #. :math:` S_{hh}.S_{hh}^{*} `
+
+   #. :math:` S_{hh}.S_{hv}^{*} `
+
+   #. :math:` S_{hh}.S_{vh}^{*} `
+
+   #. :math:` S_{hh}.S_{vv}^{*} `
+
+   #. :math:` S_{hv}.S_{hv}^{*} `
+
+   #. :math:` S_{hv}.S_{vh}^{*} `
+
+   #. :math:` S_{hv}.S_{vv}^{*} `
+
+   #. :math:` S_{vh}.S_{vh}^{*} `
+
+   #. :math:` S_{vh}.S_{vv}^{*} `
+
+   #. :math:` S_{vv}.S_{vv}^{*} `
+
+#. bsinclairtocircovariance (SinclairToCircularCovarianceMatrixFunctor)
+
+   #. :math:` S_{ll}.S_{ll}^{*} `
+
+   #. :math:` S_{ll}.S_{lr}^{*} `
+
+   #. :math:` S_{ll}.S_{rl}^{*} `
+
+   #. :math:` S_{ll}.S_{rr}^{*} `
+
+   #. :math:` S_{lr}.S_{lr}^{*} `
+
+   #. :math:` S_{lr}.S_{rl}^{*} `
+
+   #. :math:` S_{lr}.S_{rr}^{*} `
+
+   #. :math:` S_{rl}.S_{rl}^{*} `
+
+   #. :math:` S_{rl}.S_{rr}^{*} `
+
+   #. :math:` S_{rr}.S_{rr}^{*} `
+
+   With:
+
+   -  :math:` S_{ll} = 0.5(S_{hh}+j S_{hv}+j S_{vh}-S_{vv}) `
+
+   -  :math:` S_{lr} = 0.5(j S_{hh}+S_{hv}-S_{vh}+j S_{vv}) `
+
+   -  :math:` S_{rl} = 0.5(j S_{hh}-S_{hv}+ S_{vh}+j S_{vv}) `
+
+   -  :math:` S_{rr} = 0.5(-S_{hh}+j S_{hv}+j S_{vh}+S_{vv}) `
+
+   — Both cases —
+
+#. sinclairtomueller (SinclairToMueller)
+
+   #. :math:` 0.5 Re( T_{xx}.T_{xx}^{*} + T_{xy}.T_{xy}^{*} + T_{yx}.T_{yx}^{*} + T_{yy}.T_{yy}^{*} ) `
+
+   #. :math:` 0.5 Re( T_{xx}.T_{xx}^{*} - T_{xy}.T_{xy}^{*} + T_{yx}.T_{yx}^{*} - T_{yy}.T_{yy}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{xy}^{*} + T_{yx}.T_{yy}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{xy}^{*} + T_{yx}.T_{yy}^{*} ) `
+
+   #. :math:` 0.5 Re( T_{xx}.T_{xx}^{*} + T_{xy}.T_{xy}^{*} - T_{yx}.T_{yx}^{*} - T_{yy}.T_{yy}^{*} ) `
+
+   #. :math:` 0.5 Re( T_{xx}.T_{xx}^{*} - T_{xy}.T_{xy}^{*} - T_{yx}.T_{yx}^{*} + T_{yy}.T_{yy}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{xy}^{*} - T_{yx}.T_{yy}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{xy}^{*} - T_{yx}.T_{yy}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{yx}^{*} + T_{xy}.T_{yy}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{yx}^{*} - T_{xy}.T_{yy}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{yy}^{*} + T_{xy}.T_{yx}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{yy}^{*} - T_{xy}.T_{yx}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{yx}^{*} + T_{xy}.T_{yy}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{yx}^{*} - T_{xy}.T_{yy}^{*} ) `
+
+   #. :math:` Re( T_{xx}.T_{yy}^{*} + T_{xy}.T_{yx}^{*} ) `
+
+   #. :math:` Im( T_{xx}.T_{yy}^{*} - T_{xy}.T_{yx}^{*} ) `
+
+   With :
+
+   -  :math:` T_{xx} = -S_{hh} `
+
+   -  :math:` T_{xy} = -S_{hv} `
+
+   -  :math:` T_{yx} = S_{vh} `
+
+   -  :math:` T_{yy} = S_{vv} `
+
+#. muellertopoldegandpower (MuellerToPolarisationDegreeAndPowerFunctor)
+
+   #. :math:` P_{min} `
+
+   #. :math:` P_{max} `
+
+   #. :math:` DegP_{min} `
+
+   #. :math:` DegP_{max} `
+
+Examples :
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invv imageryC_VV.tif
+                                    -conv msinclairtocoherency
+                                    -outc coherency.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invv imageryC_VV.tif
+                            -conv msinclairtocovariance
+                                    -outc covariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invv imageryC_VV.tif
+                                -conv msinclairtocircovariance
+                                    -outc circ_covariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inc coherency.tif 
+                            -conv mcoherencytomueller
+                                    -outf mueller.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inc covariance.tif 
+                            -conv mcovariancetocoherencydegree
+                                    -outc coherency_degree.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inc covariance.tif 
+                        -conv mcovariancetocoherency
+                                    -outc coherency.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inc covariance.tif 
+                        -conv mlinearcovariancetocircularcovariance
+                                    -outc circ_covariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inf mueller.tif 
+                        -conv muellertomcovariance
+                                    -outc covariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invh imageryC_VH.tif 
+                                    -invv imageryC_VV.tif
+                        -conv bsinclairtocoherency
+                                    -outc bcoherency.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invh imageryC_VH.tif 
+                                    -invv imageryC_VV.tif 
+                        -conv bsinclairtocovariance
+                                    -outc bcovariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invh imageryC_VH.tif 
+                                    -invv imageryC_VV.tif
+                            -conv bsinclairtocircovariance
+                                    -outc circ_bcovariance.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inhh imageryC_HH.tif 
+                                    -inhv imageryC_HV.tif 
+                                    -invh imageryC_VH.tif 
+                                    -invv imageryC_VV.tif 
+                        -conv sinclairtomueller
+                                    -outf mueller.tif 
+
+#. ::
+
+       otbcli_SARPolarMatrixConvert -inf mueller.tif 
+                            -conv muellertopoldegandpower
+                                    -outf degreepower.tif 
+
+Polarimetric decompositions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+From one-band complex images (HH, HV, VH, VV), returns the selected
+decomposition. The H-alpha-A decomposition is currently the only one
+available; it is implemented for the monostatic case (transmitter and
+receiver are co-located). User must provide three one-band complex
+images HH, HV or VH, and VV (HV = VH in monostatic case). The H-alpha-A
+decomposition consists in averaging 3x3 complex coherency matrices
+(incoherent analysis) : The user must provide the size of the averaging
+window, thanks to the parameter inco.kernelsize. The applications
+returns a float vector image, made of three channels : H(entropy),
+Alpha, A(Anisotropy).
+
+Here are the formula used (refer to the previous section about how the
+coherence matrix is obtained from the Sinclair one):
+
+#. :math:` entropy = -\sum_{i=0}^{2} \frac{p[i].\log{p[i]}}{\log{3}} `
+
+#. :math:` \alpha = \sum_{i=0}^{2} p[i].\alpha_{i} `
+
+#. :math:` anisotropy = \frac {SortedEigenValues[1] - SortedEigenValues[2]}{SortedEigenValues[1] + SortedEigenValues[2]} `
+
+Where:
+
+-  :math:` p[i] = max(SortedEigenValues[i], 0) / \sum_{i=0}^{2, SortedEigenValues[i]>0} SortedEigenValues[i] `
+
+-  :math:` \alpha_{i} = \left| SortedEigenVector[i] \right|* \frac{180}{\pi}`
+
+Example :
+
+We first extract a ROI from the original image (not required). Here
+imagery\_HH.tif represents the element HH of the Sinclair matrix (and so
+forth).
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_HH.tif -out imagery_HH_extract.tif  
+                 -startx 0 -starty 0 
+                         -sizex 1000 -sizey 1000 
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_HV.tif -out imagery_HV_extract.tif  
+                 -startx 0 -starty 0
+                         -sizex 1000 -sizey 1000 
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_VV.tif -out imagery_VV_extract.tif  
+                 -startx 0 -starty 0
+                         -sizex 1000 -sizey 1000 
+
+Next we apply the H-alpha-A decomposition:
+
+::
+
+    otbcli_SARDecompositions -inhh imagery_HH_extract.tif 
+                             -inhv imagery_HV_extract.tif 
+                             -invv imagery_VV_extract.tif 
+                 -decomp haa -inco.kernelsize 5 
+                             -out haa_extract.tif 
+
+The result has three bands : entropy (0..1) - alpha (0..90) - anisotropy
+(0..1). It is splitted into 3 mono-band images thanks to following
+command :
+
+::
+
+    otbcli_SplitImage -in haa_extract.tif -out haa_extract_splitted.tif 
+
+Each image is then colored thanks to a color look-up table ’hot’. Notice
+how minimum and maximum values are provided for each polarimetric
+variable.
+
+-  ::
+
+       otbcli_ColorMapping -in haa_extract_splitted_0.tif 
+                           -method continuous -method.continuous.lut hot 
+                           -method.continuous.min 0 
+                           -method.continuous.max 1
+                           -out entropy_hot.tif uint8 
+
+-  ::
+
+       otbcli_ColorMapping -in haa_extract_splitted_1.tif -method continuous 
+           -method.continuous.lut hot -method.continuous.min 0 -method.continuous.max
+           90 -out alpha_hot.tif uint8 
+
+-  ::
+
+       otbcli_ColorMapping -in haa_extract_splitted_2.tif 
+                           -method continuous -method.continuous.lut hot 
+                           -method.continuous.min 0 
+                           -method.continuous.max 1
+                           -out anisotropy_hot.tif uint8 
+
+The results are shown in the figures below ([fig:entropyimage] ,
+[fig:alphaimage] and [fig:anisotropyimage]).
+
+|image| [fig:entropyimage]
+
+|image| [fig:alphaimage]
+
+|image| [fig:anisotropyimage]
+
+Polarimetric synthetis
+~~~~~~~~~~~~~~~~~~~~~~
+
+This application gives, for each pixel, the power that would have been
+received by a SAR system with a basis different from the classical (H,V)
+one (polarimetric synthetis). The new basis are indicated through two
+Jones vectors, defined by the user thanks to orientation (psi) and
+ellipticity (khi) parameters. These parameters are namely psii, khii,
+psir and khir. The suffixes (i) and (r) refer to the transmitting
+antenna and the receiving antenna respectively. Orientations and
+ellipticity are given in degrees, and are between -90/90 degrees and
+-45/45 degrees respectively.
+
+Four polarization architectures can be processed :
+
+#. HH\_HV\_VH\_VV : full polarization, general bistatic case.
+
+#. HH\_HV\_VV or HH\_VH\_VV : full polarization, monostatic case
+   (transmitter and receiver are co-located).
+
+#. HH\_HV : dual polarization.
+
+#. VH\_VV : dual polarization.
+
+The application takes a complex vector image as input, where each band
+correspond to a particular emission/reception polarization scheme. User
+must comply with the band order given above, since the bands are used to
+build the Sinclair matrix.
+
+In order to determine the architecture, the application first relies on
+the number of bands of the input image.
+
+#. Architecture HH\_HV\_VH\_VV is the only one with four bands, there is
+   no possible confusion.
+
+#. Concerning HH\_HV\_VV and HH\_VH\_VV architectures, both correspond
+   to a three channels image. But they are processed in the same way, as
+   the Sinclair matrix is symmetric in the monostatic case.
+
+#. Finally, the two last architectures (dual-polarization), can’t be
+   distinguished only by the number of bands of the input image. User
+   must then use the parameters emissionh and emissionv to indicate the
+   architecture of the system : emissionh=1 and emissionv=0 for HH\_HV,
+   emissionh=0 and emissionv=1 for VH\_VV.
+
+Note : if the architecture is HH\_HV, khii and psii are automatically
+set to 0/0 degrees; if the architecture is VH\_VV, khii and psii are
+automatically set to 0/90 degrees.
+
+It is also possible to force the calculation to co-polar or cross-polar
+modes. In the co-polar case, values for psir and khir will be ignored
+and forced to psii and khii; same as the cross-polar mode, where khir
+and psir will be forced to psii + 90 degrees and -khii.
+
+Finally, the result of the polarimetric synthesis is expressed in the
+power domain, through a one-band scalar image.
+
+The final formula is thus : :math:`P=\mid B^T.[S].A\mid^2` , where A ans
+B are two Jones vectors and S is a Sinclair matrix.
+
+The two figures below ([fig:polsynthll] and [fig:polsynthlr]) show the
+two images obtained with the basis LL and LR (L for left circular
+polarization and R for right polarization), from a Radarsat-2 image
+taken over Vancouver, Canada. Once the four two-band images imagery\_HH
+imagery\_HV imagery\_VH imagery\_VV were merged into a single four
+complex band image imageryC\_HH\_HV\_VH\_VV.tif, the following commands
+were used to produce the LL and LR images :
+
+::
+
+    otbcli_SARPolarSynth -in imageryC_HH_HV_VH_VV.tif 
+                 -psii 0 -khii 45 -mode co 
+                         -out test-LL.tif 
+
+::
+
+    otbcli_SARPolarSynth -in imageryC_HH_HV_VH_VV.tif
+                         -psii 0 -khii 45 -mode cross 
+                         -out test-LR.tif 
+
+The produced images were then rescaled to intensities ranging from 0 to
+255 in order to be displayed.
+
+|image| [fig:polsynthll]
+
+|image| [fig:polsynthlr]
+
+Polarimetric data visualization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Finally, let’s talk about polarimetric data visualization. There is a
+strong link between polarimetric data visualization and the way they can
+be decomposed into significant physical processes. Indeed, by setting
+the results (or combinations) of such decompositions to RGB channels
+that help in interpreting SAR polarimetric images.
+
+There is no specific dedicated application yet, but it is possible to
+use a combination of different applications as a replacement. Let’s do
+it with a RADARSAT-2 acquisition over the famous place of the Golden
+Gate Bridge, San Francisco, California.
+
+We first make an extract from the original image (not mandatory).
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_HH.tif -out imagery_HH_extract.tif 
+                         -startx 0 -starty 6300 
+                         -sizex 2790 -sizey 2400 
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_HV.tif -out imagery_HV_extract.tif 
+                         -startx 0 -starty 6300 
+                         -sizex 2790 -sizey 2400 
+
+-  ::
+
+       otbcli_ExtractROI -in imagery_VV.tif -out imagery_VV_extract.tif 
+                         -startx 0 -starty 6300 
+                         -sizex 2790 -sizey 2400 
+
+Then we compute the amplitude of each band using the **BandMath**
+application:
+
+-  ::
+
+       otbcli_BandMath -il imagery_HH_extract.tif -out HH.tif 
+                       -exp "sqrt(im1b1^2+im1b2^2)" 
+
+-  ::
+
+       otbcli_BandMath -il imagery_HV_extract.tif -out HV.tif
+                       -exp "sqrt(im1b1^2+im1b2^2)" 
+
+-  ::
+
+       otbcli_BandMath -il imagery_VV_extract.tif -out VV.tif
+                       -exp "sqrt(im1b1^2+im1b2^2)" 
+
+Note that BandMath application interprets the image
+’imagery\_XX\_extract.tif’ as an image made of two bands, where the
+first one is related to the real part of the signal, and where the
+second one is related to the imaginary part (that’s why the modulus is
+obtained by the expressions :math:`im1b1^2+im1b2^2`).
+
+Then, we rescale the produced images to intensities ranging from 0 to
+255:
+
+-  ::
+
+       otbcli_Rescale -in HH.tif -out HH_res.png uint8 
+
+-  ::
+
+       otbcli_Rescale -in HV.tif -out HV_res.png uint8 
+
+-  ::
+
+       otbcli_Rescale -in VV.tif -out VV_res.png uint8 
+
+Figures below ([fig:hhfrisco] , [fig:hvfrisco] and [fig:vvfrisco]) show
+the images obtained :
+
+|image| [fig:hhfrisco]
+
+|image| [fig:hvfrisco]
+
+|image| [fig:vvfrisco]
+
+Now the most interesting step. In order to get a friendly coloration of
+these data, we are going to use the Pauli decomposition, defined as
+follows :
+
+-  :math:`a=\frac{|S_{HH}-S_{VV}|}{\sqrt{2}}`
+
+-  :math:`b=\sqrt{2}.|S_{HV}|`
+
+-  :math:`c=\frac{|S_{HH}+S_{VV}|}{\sqrt{2}}`
+
+We use the BandMath application again:
+
+-  ::
+
+       otbcli_BandMath -il imagery_HH_extract.tif imagery_HV_extract.tif
+                           imagery_VV_extract.tif 
+                       -out Channel1.tif 
+                       -exp "sqrt(((im1b1-im3b1)^2+(im1b2-im3b2)^2))" 
+
+-  ::
+
+       otbcli_BandMath -il imagery_HH_extract.tif imagery_HV_extract.tif 
+                       imagery_VV_extract.tif 
+                       -out Channel2.tif 
+                       -exp "sqrt(im2b1^2+im2b2^2)" 
+
+-  ::
+
+       otbcli_BandMath -il imagery_HH_extract.tif imagery_HV_extract.tif
+       imagery_VV_extract.tif 
+                       -out Channel3.tif 
+                       -exp "sqrt(((im1b1+im3b1)^2+(im1b2+im3b2)^2))" 
+
+Note that sqrt(2) factors have been omitted purposely, since their
+effects will be canceled by the rescaling step. Then, we rescale the
+produced images to intensities ranging from 0 to 255 :
+
+-  ::
+
+       otbcli_Rescale -in Channel1.tif -out Channel1_res.tif uint8 
+
+-  ::
+
+       otbcli_Rescale -in Channel2.tif -out Channel2_res.tif uint8 
+
+-  ::
+
+       otbcli_Rescale -in Channel3.tif -out Channel3_res.tif uint8 
+
+And finally, we merge the three bands into a single RGB image.
+
+::
+
+    otbcli_ConcatenateImages -il Channel1_res.tif Channel2_res.tif Channel3_res.tif
+    -out visuPauli.png 
+
+The result is shown in the figure below ([fig:colorfrisco]).
+
+|image| [fig:colorfrisco]
+
+.. |image| image:: ../Art/SARImages/S1-VV-extract-int.png
+.. |image| image:: ../Art/SARImages/S1-VV-despeckled-extract.png
+.. |image| image:: ../Art/SARImages/sarpol_conversion_schema.png
+.. |image| image:: ../Art/SARImages/entropyhot.png
+.. |image| image:: ../Art/SARImages/alphahot.png
+.. |image| image:: ../Art/SARImages/anisotropyhot.png
+.. |image| image:: ../Art/SARImages/test-left-co-2.png
+.. |image| image:: ../Art/SARImages/test-left-cross-2.png
+.. |image| image:: ../Art/SARImages/RSAT2_HH_Frisco.png
+.. |image| image:: ../Art/SARImages/RSAT2_HV_Frisco.png
+.. |image| image:: ../Art/SARImages/RSAT2_VV_Frisco.png
+.. |image| image:: ../Art/SARImages/visuPauli.png
diff --git a/Documentation/Cookbook/rst/stereo.rst b/Documentation/Cookbook/rst/stereo.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9984b065076a2ae5787dd770f9d50250c9524d1a
--- /dev/null
+++ b/Documentation/Cookbook/rst/stereo.rst
@@ -0,0 +1,502 @@
+Stereoscopic reconstruction from VHR optical images pair
+========================================================
+
+This section describes how to convert pair of stereo images into
+elevation information.
+
+The standard problem of terrain reconstruction with available contains
+the following steps:
+
+-  Estimation of displacements grids for epipolar geometry
+   transformation
+
+-  Epipolar resampling of the image pair using those grids
+
+-  Dense disparity map estimation
+
+-  Projection of the disparities on a Digital Surface Model (DSM)
+
+Let’s go to the third dimension!
+
+Estimate epipolar geometry transformation
+-----------------------------------------
+
+The aim of this application is to generate resampled grids to transform
+images in epipolar geometry. Epipolar geometry is the geometry of stereo
+vision (see `here <http://en.wikipedia.org/wiki/Epipolar_geometry>`__).
+The operation of stereo rectification determines transformations to
+apply to each image such that pairs of conjugate epipolar lines become
+collinear, parallel to one of the image axes and aligned. In this
+geometry, the objects present on a given row of the left image are also
+located on the same line in the right image.
+
+Applying this transformation reduces the problem of elevation (or stereo
+correspondences determination) to a 1-D problem. We have two images
+image1 and image2 over the same area (the stereo pair) and we assume
+that we know the localization functions (forward and inverse) associated
+for each of these images.
+
+The forward function allows to go from the image referential to the
+geographic referential:
+
+.. math:: (long,lat) = f^{forward}_{image1}(i,j,h)
+
+where h is the elevation hypothesis, :math:`(i,j)` are the pixel
+coordinates in image1 and (long,lat) are geographic coordinates. As you
+can imagine, the inverse function allows to go from geographic
+coordinates to the image geometry.
+
+For the second image, in that case, the expression of the inverse
+function is:
+
+.. math:: (long,lat,h) = f^{inverse}_{image2}(i,j)
+
+Using jointly the forward and inverse functions from the image pair, we
+can construct a co-localization function
+:math:`f_{image1 \rightarrow image2}` between the position of a pixel in
+the first and its position in the second one:
+
+.. math:: (i_{image2},j_{image2}) = f_{image1 \rightarrow image2} (i_{image1} , j_{image1} , h)
+
+The expression of this function is:
+
+.. math:: f_{image1 \rightarrow image2} (i_{image1} , j_{image1} , h) =  f^{inverse}_{image2} f^{forward}_{image1}((i_{image1} , j_{image1}), h)
+
+The expression is not really important, what we need to understand is
+that if we are able to determine for a given pixel in image1 the
+corresponding pixel in image2, as we know the expression of the
+co-localization function between both images, we can determine by
+identification the information about the elevation (variable h in the
+equation)!
+
+We now have the mathematical basis to understand how 3-D information can
+be extracted by examination of the relative positions of objects in the
+two 2-D epipolar images.
+
+The construction of the two epipolar grids is a little bit more
+complicated in the case of VHR optical images.That is because most of
+passive remote sensing from space use a push-broom sensor, which
+corresponds to a line of sensors arranged perpendicularly to the flight
+direction of the spacecraft. This acquisition configuration implies a
+slightly different strategy for stereo-rectification (see
+`here <http://en.wikipedia.org/wiki/Epipolar_geometry#Epipolar_geometry_of_pushbroom_sensor>`__).
+
+We will now explain how to use the application to produce two images
+which are **deformation grids** to resample the two images in epipolar
+geometry.
+
+::
+
+    otbcli_StereoRectificationGridGenerator -io.inleft image1.tif
+                                            -io.inright image2.tif
+                                            -epi.elevation.avg.value 50
+                                            -epi.step 5
+                                            -io.outimage1 outimage1_grid.tif
+                                            -io.outright outimage1_grid.tif
+
+The application estimates the displacement to apply to each pixel in
+both input images to obtain epipolar geometry.The application accept a
+‘step’ parameter to estimate displacements on a coarser grid. Here we
+estimate the displacements every 10 pixels. This is because in most
+cases with a pair of VHR and a small angle between the two images, this
+grid is very smooth. Moreover, the implementation is not *streamable*
+and uses potentially a lot of memory. Therefore it is generally a good
+idea to estimate the displacement grid at a coarser resolution.
+
+The application outputs the size of the output images in epipolar
+geometry. **Note these values**, we will use them in the next step to
+resample the two images in epipolar geometry.
+
+In our case, we have:
+
+::
+
+    Output parameters value:
+    epi.rectsizex: 4462
+    epi.rectsizey: 2951
+    epi.baseline:  0.2094
+
+The epi.baseline parameter provides the mean value (in
+:math:`pixels.meters^{-1}`) of the baseline to sensor altitude ratio. It
+can be used to convert disparities to physical elevation, since a
+disparity of this value will correspond to an elevation offset of one
+meter with respect to the mean elevation.
+
+we can now move forward to the resampling in epipolar geometry.
+
+Resample images in epipolar geometry
+------------------------------------
+
+The former application generates two grids of displacements. The allows
+to resample the two input images in the epipolar geometry using these
+grids. These grids are intermediary results not really useful on their
+own in most cases. This second step *only* consists in applying the
+transformation and resample both images. This application can obviously
+be used in lot of other contexts.
+
+The two commands to generate epipolar images are:
+
+::
+
+    otbcli_GridBasedImageResampling -io.in image1.tif
+                                    -io.out image1_epipolar.tif
+                                    -grid.in outimage1_grid.tif
+                                    -out.sizex 4462
+                                    -out.sizey 2951
+
+::
+
+    otbcli_GridBasedImageResampling -io.in image2.tif
+                                    -io.out image2_epipolar.tif
+                                    -grid.in outimage2_grid.tif
+                                    -out.sizex 4462
+                                    -out.sizey 2951
+
+As you can see, we set *sizex* and *sizey* parameters using output
+values given by the application to set the size of the output epipolar
+images.
+
+|image| |image| [fig:EpipolarImages]
+
+We obtain two images in epipolar geometry, as shown in
+figure [fig:EpipolarImages]. Note that the application allows to
+resample only a part of the image using the *-out.ulx* and *-out.uly*
+parameters.
+
+Disparity estimation: Block matching along epipolar lines
+---------------------------------------------------------
+
+Finally, we can begin the stereo correspondences lookup process!
+
+Things are becoming a little bit more complex but do not worry. First,
+we will describe the power of the application.
+
+The resampling of our images in epipolar geometry allows us to constrain
+the search along a 1-dimensional line as opposed to both dimensions, but
+what is even more important is that the disparities along the lines,
+i.e. the offset along the lines measured by the block-matching process
+can be directly linked to the local elevation
+
+An almost complete spectrum of stereo correspondence algorithms has been
+published and it is still augmented at a significant rate! See for
+example `. <http://en.wikipedia.org/wiki/Block-matching_algorithm>`__
+The implements different strategies for block matching:
+
+-  Sum of Square Distances block-matching (SSD)
+
+-  Normalized Cross-Correlation (NCC)
+
+-  Lp pseudo-norm (LP)
+
+An other important parameter (mandatory in the application!) is the
+range of disparities. In theory, the block matching can perform a blind
+exploration and search for a infinite range of disparities between the
+stereo pair. We need now to evaluate a range of disparities where the
+block matching will be performed (in the general case from the deepest
+point on Earth, `the Challenger
+Deep <http://en.wikipedia.org/wiki/Challenger_Deep>`__. to the Everest
+summit!)
+
+We deliberately exaggerated but you can imagine that without a smaller
+range the block matching algorithm can take a lot of time. That is why
+these parameters are mandatory for the application and as a consequence
+we need to estimate them manually. This is pretty simple using the two
+epipolar images.
+
+In our case, we take one point on a *flat* area. The image coordinate in
+:math:`image_{1}` is :math:`[1970,1525]` and in :math:`image_{2}` is
+:math:`[1970,1526]`. We then select a second point on a higher region
+(in our case a point near the top of the Pyramid of Cheops!). The image
+coordinate of this pixel in :math:`image_{1}` is :math:`[1661,1299]` and
+in :math:`image_{2}` is :math:`[1633,1300]`. So you see for the
+horizontal exploration, we must set the minimum value lower than
+:math:`-30` (the convention for the sign of the disparity range is from
+image1 to image2).
+
+Note that this estimation can be simplified using an external DEM in the
+application. Regarding the vertical disparity, in the first step we said
+that we reduced the problem of 3-D extraction to a 1-D problem, but this
+is not completely true in general cases. There might be small
+disparities in the vertical direction which are due to parallax errors
+(i.e. epipolar lines exhibit a small shift in the vertical direction,
+around 1 pixel). In fact, the exploration is typically smaller along the
+vertical direction of disparities than along the horizontal one. You can
+also estimate them on the epipolar pair (in our case we use a range of
+:math:`-1` to :math:`1`).
+
+One more time, take care of the sign of this minimum and this maximum
+for disparities (always from image1 to image2).
+
+The command line for the application is :
+
+::
+
+    otbcli_BlockMatching -io.inleft image1_epipolar.tif
+                         -io.inright image2_epipolar.tif
+                         -io.out disparity_map_ncc.tif
+                         -bm.minhd -45
+                         -bm.maxhd 5
+                         -bm.minvd 1
+                         -bm.maxvd 1
+                         -mask.inleft image1_epipolar_mask.tif
+                         -mask.inright image2_epipolar_mask.tif
+                         -io.outmetric 1
+                         -bm.metric ncc
+                         -bm.subpixel dichotomy
+                         -bm.medianfilter.radius 5
+                         -bm.medianfilter.incoherence 2.0
+
+The application creates by default a two bands image : the horizontal
+and vertical disparities.
+
+The application gives access to a lot of other powerful functionalities
+to improve the quality of the output disparity map.
+
+Here are a few of these functionalities:
+
+-  -io.outmetric: if the optimal metric values image is activated, it
+   will be concatenated to the output image (which will then have three
+   bands: horizontal disparity, vertical disparity and metric value)
+
+-  -bm.subpixel: Perform sub-pixel estimation of disparities
+
+-  -mask.inleft and -mask.inright: you can specify a no-data value which
+   will discard pixels with this value (for example the epipolar
+   geometry can generate large part of images with black pixels) This
+   mask can be easily generated using the application:
+
+   ::
+
+       otbcli_BandMath -il image1_epipolar.tif
+                       -out image1_epipolar_mask.tif
+                       -exp "if(im1b1<=0,0,255)"
+
+   ::
+
+       otbcli_BandMath -il image2_epipolar.tif
+                       -out image2_epipolar_mask.tif
+                       -exp "if(im1b1<=0,0,255)"
+
+-  -mask.variancet : The block matching algorithm has difficulties to
+   find matches on uniform areas. We can use the variance threshold to
+   discard those regions and speed-up computation time.
+
+-  -bm.medianfilter.radius 5 and -bm.medianfilter.incoherence 2.0:
+   Applies a median filter to the disparity map. The median filter
+   belongs to the family of nonlinear filters. It is used to smooth an
+   image without being biased by outliers or shot noise. The radius
+   corresponds to the neighbourhood where the median value is computed.
+   A detection of incoherence between the input disparity map and the
+   median-filtered one is performed (a pixel corresponds to an
+   incoherence if the absolute value of the difference between the pixel
+   value in the disparity map and in the median image is higher than the
+   incoherence threshold, whose default value is 1). Both parameters
+   must be defined in the application to activate the filter.
+
+Of course all these parameters can be combined to improve the disparity
+map.
+
+|image| |image| [fig:DisparityMetric]
+
+From disparity to Digital Surface Model
+---------------------------------------
+
+Using the previous application, we evaluated disparities between images.
+The next (and last!) step is now to transform the disparity map into an
+elevation information to produce an elevation map. It uses as input the
+disparity maps (horizontal and vertical) to produce a Digital Surface
+Model (DSM) with a regular sampling. The elevation values is computed
+from the triangulation of the “left-right” pairs of matched pixels. When
+several elevations are available on a DSM cell, the highest one is kept.
+
+First, an important point is that it is often a good idea to rework the
+disparity map given by the application to only keep relevant
+disparities. For this purpose, we can use the output optimal metric
+image and filter disparities with respect to this value.
+
+For example, if we used Normalized Cross-Correlation (NCC), we can keep
+only disparities where optimal metric value is superior to :math:`0.9`.
+Disparities below this value can be consider as inaccurate and will not
+be used to compute elevation information (the *-io.mask* parameter can
+be used for this purpose).
+
+This filtering can be easily done with .
+
+We first use the application to filter disparities according to their
+optimal metric value:
+
+::
+
+    otbcli_BandMath -il disparity_map_ncc.tif
+                    -out thres_hdisparity.tif uint8
+                    -exp "if(im1b3>0.9,255,0)"
+
+Then, we concatenate thresholded disparities using the :
+
+::
+
+    otbcli_ConcatenateImages -il thres_hdisparity.tif thres_vdisparity.tif
+                             -out thres_hvdisparity.tif
+
+Now, we can use the application to compute the elevation map from the
+filtered disparity maps.
+
+::
+
+    otbcli_DisparityMapToElevationMap -io.in disparity_map_ncc.tif
+                                      -io.left image1.tif
+                                      -io.right image2.tif
+                                      -io.lgrid outimage1_pyramid.tif
+                                      -io.rgrid outimage2_pyramid.tif
+                                      -io.mask thres_hdisparity.tif
+                                      -io.out disparity_map_ssd_to_elevation.tif
+                                      -hmin 10
+                                      -hmax 400
+                                      -elev.default 50
+
+It produces the elevation map projected in WGS84 (EPSG
+code:\ :math:`4326`) over the ground area covered by the stereo pair.
+Pixels values are expressed in meters.
+
+|image| [fig:stereo:sub:`o`\ ut]
+
+This is it ! Figure [fig:stereo\ :sub:`o`\ ut] shows the output DEM from
+the Cheops pair.
+
+One application to rule them all in multi stereo framework scheme
+-----------------------------------------------------------------
+
+An application has been added to fuse one or multiple stereo
+reconstruction(s) using all in one approach : . It computes the DSM from
+one or several stereo pair. First of all the user have to choose his
+input data and defines stereo couples using *-input.co* string
+parameter. This parameter use the following formatting convention “
+:math:`index_{0}` :math:`index_{1}`, :math:`index_{2}`
+:math:`index_{3}`, …”, which will create a first couple with image
+:math:`index_{0}` and :math:`index_{1}`, a second with image
+:math:`index_{1}` and :math:`index_{2}`, and so on. If left blank images
+are processed by pairs (which is equivalent as using “ 0 1,2 3,4 5 ” …).
+In addition to the usual elevation and projection parameters, main
+parameters have been splitted in groups detailled below:
+
+Output :
+    output parameters : DSM resolution, NoData value, Cell Fusion
+    method,
+
+    -  : output projection map selection.
+
+    -  : Spatial Sampling Distance of the output DSM in meters
+
+    -  : DSM empty cells are filled with this float value (-32768 by
+       default)
+
+    -  : Choice of fusion strategy in each DSM cell (max, min, mean,
+       acc)
+
+    -  : Output DSM
+
+    -  : Output DSM extent choice
+
+Stereorect :
+    Direct and inverse stereorectification grid subsampling parameters
+
+    -  : Step of the direct deformation grid (in pixels)
+
+    -  : Sub-sampling of the inverse epipolar grid
+
+BM :
+    Block Matching parameters.
+
+    -  : Block-matching metric choice (robust SSD, SSD, NCC, Lp Norm)
+
+    -  : Radius of blocks for matching filter (in pixels, :math:`2` by
+       default)
+
+    -  : Minimum altitude below the selected elevation source (in
+       meters, -20.0 by default)
+
+    -  : Maximum altitude above the selected elevation source (in
+       meters, 20.0 by default)
+
+Postproc :
+    Post-Processing parameters
+
+    -  : use bijection consistency. Right to Left correlation is
+       computed to validate Left to Right disparities. If bijection is
+       not found pixel is rejected
+
+    -  : use median disparities filtering (disabled by default)
+
+    -  : use block matching metric output to discard pixels with low
+       correlation value (disabled by default, float value)");
+
+Mask :
+    Compute optional intermediate masks.
+
+    -  : Mask for left input image (must have the same size for all
+       couples)
+
+    -  : Mask for right input image (must have the same size for all
+       couples)
+
+    -  : This parameter allows to discard pixels whose local variance is
+       too small. The size of the neighborhood is given by the radius
+       parameter. (disabledd by default)
+
+Stereo reconstruction good practices
+------------------------------------
+
+The parameters and are used inside the application to derive the minimum
+and maximum horizontal disparity exploration, so they have a critical
+impact on computation time. It is advised to choose an elevation source
+that is not too far from the DSM you want to produce (for instance, an
+SRTM elevation model). Therefore, the altitude from your elevation
+source will be already taken into account in the epipolar geometry and
+the disparities will reveal the elevation offsets (such as buildings).
+It allows you to use a smaller exploration range along the elevation
+axis, causing a smaller exploration along horizontal disparities and
+faster computation.
+
+and have also a deep impact in time consumption, thus they have to be
+carefully chosen in case of large image processing.
+
+To reduce time consumption it would be useful to crop all sensor images
+to the same extent. The easiest way to do that is to choose an image as
+reference, and then apply application on the other sensor images using
+the fit mode option.
+
+Algorithm outline
+-----------------
+
+The following algorithms are used in the application: For each sensor
+pair
+
+-  Compute the epipolar deformation grids from the stereo pair (direct
+   and inverse)
+
+-  Resample into epipolar geometry with BCO interpolator
+
+-  Create masks for each epipolar image : remove black borders and
+   resample input masks
+
+-  Compute horizontal disparities with a block matching algorithm
+
+-  Refing Disparities to sub-pixel precision with a dichotomy algorithm
+
+-  Apply an optional Median filter
+
+-  Filter disparities based on the correlation score (optional) and
+   exploration bounds
+
+-  Translate disparities in sensor geometry
+
+-  Convert disparity map to 3D map
+
+Then fuse all 3D maps to produce DSM with desired geographic or
+cartographic projection and parametrizable extent.
+
+.. |image| image:: ../Art/MonteverdiImages/stereo_image1_epipolar.png
+.. |image| image:: ../Art/MonteverdiImages/stereo_image2_epipolar.png
+.. |image| image:: ../Art/MonteverdiImages/stereo_disparity_horizontal.png
+.. |image| image:: ../Art/MonteverdiImages/stereo_disparity_metric.png
+.. |image| image:: ../Art/MonteverdiImages/stereo_dem_zoom.png