Commit a330511b authored by Julien Michel's avatar Julien Michel

Merge branch 'develop' into colormap-shader

parents f6f9d6ef 05633327
......@@ -16,7 +16,7 @@ if( MUPARSERX_INCLUDE_DIR )
endif()
find_path( MUPARSERX_INCLUDE_DIR mpParser.h
PATH_SUFFIXES mpParser )
PATH_SUFFIXES mpParser muparserx)
if(EXISTS "${MUPARSERX_INCLUDE_DIR}/mpDefines.h")
file(READ "${MUPARSERX_INCLUDE_DIR}/mpDefines.h" _mpDefines_h_CONTENTS)
......
......@@ -28,6 +28,8 @@ set(OTB_BUILD_SHARED "@BUILD_SHARED_LIBS@")
# List of available OTB modules.
set(OTB_MODULES_ENABLED "@OTB_CONFIG_MODULES_ENABLED@")
set(OTB_APPLICATION_PATH "@OTB_CONFIG_APPLICATION_PATH@")
# Import OTB targets.
set(OTB_CONFIG_TARGETS_FILE "@OTB_CONFIG_TARGETS_FILE@")
if(NOT OTB_TARGETS_IMPORTED@OTB_CONFIG_TARGETS_CONDITION@)
......
......@@ -27,3 +27,11 @@ set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${ITK_REQUIRED_LINK_
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${ITK_REQUIRED_LINK_FLAGS}")
# OpenCV : no USE_FILE defined
#PYTHONPATH
if( OTB_PYTHONPATH )
if(DEFINED ENV{PYTHONPATH})
set(ENV{PYTHONPATH} "${OTB_PYTHONPATH};$ENV{PYTHONPATH}")
endif()
endif()
......@@ -305,6 +305,12 @@ set(OTB_CONFIG_CODE "
set(OTB_MODULES_DIR \"${OTB_MODULES_DIR}\")")
set(OTB_CONFIG_CMAKE_DIR "${OTB_SOURCE_DIR}/CMake")
set(OTB_USE_FILE "${OTB_CONFIG_CMAKE_DIR}/UseOTB.cmake")
if(OTB_WRAP_PYTHON)
set(OTB_CONFIG_CODE "${OTB_CONFIG_CODE}
set(OTB_PYTHONPATH \"${OTB_BINARY_DIR}/${OTB_INSTALL_PYTHON_DIR}\")")
endif()
set(OTB_CONFIG_APPLICATION_PATH "${OTB_BINARY_DIR}/${OTB_INSTALL_APP_DIR}")
set(OTB_CONFIG_TARGETS_CONDITION " AND NOT OTB_BINARY_DIR")
set(OTB_CONFIG_TARGETS_FILE "${OTB_BINARY_DIR}/OTBTargets.cmake")
set(OTB_CONFIG_MODULE_API_FILE "${OTB_SOURCE_DIR}/CMake/OTBModuleAPI.cmake")
......@@ -326,6 +332,11 @@ set(OTB_CONFIG_CODE "${OTB_CONFIG_CODE}
set(OTB_MODULES_DIR \"\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PACKAGE_DIR}/Modules\")")
set(OTB_USE_FILE "\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PACKAGE_DIR}/UseOTB.cmake")
set(OTB_CONFIG_CMAKE_DIR "\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PACKAGE_DIR}")
if(OTB_WRAP_PYTHON)
set(OTB_CONFIG_CODE "${OTB_CONFIG_CODE}
set(OTB_PYTHONPATH \"\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PYTHON_DIR}\")")
endif()
set(OTB_CONFIG_APPLICATION_PATH "\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_APP_DIR}")
set(OTB_CONFIG_TARGETS_CONDITION "")
set(OTB_CONFIG_TARGETS_FILE "\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PACKAGE_DIR}/OTBTargets.cmake")
set(OTB_CONFIG_MODULE_API_FILE "\${OTB_INSTALL_PREFIX}/${OTB_INSTALL_PACKAGE_DIR}/OTBModuleAPI.cmake")
......
#!/bin/sh
export LD_LIBRARY_PATH=@OTB_INSTALL_PREFIX@/lib:$LD_LIBRARY_PATH
export PYTHONPATH=@OTB_INSTALL_PREFIX@/lib:@OTB_PYTHONPATH@:$PYTHONPATH
export OTB_APPLICATION_PATH=@OTB_INSTALL_PREFIX@/lib/otb/applications
@PYTHON_EXECUTABLE@ @CMAKE_SOURCE_DIR@/Scripts/otbGenerateWrappersRstDoc.py
export PYTHONPATH=@OTB_PYTHONPATH@:$PYTHONPATH
export OTB_APPLICATION_PATH=@OTB_APPLICATION_PATH@
@PYTHON_EXECUTABLE@ @CMAKE_SOURCE_DIR@/Scripts/otbGenerateWrappersRstDoc.py -o "$1"
This diff is collapsed.
......@@ -430,7 +430,11 @@ def RstHeading(text, delimiter):
def ApplicationToRst(appname):
output = ""
app = otbApplication.Registry.CreateApplication(appname)
app = None
try:
app = otbApplication.Registry.CreateApplication(appname)
except e:
print e
# TODO: remove this when bug 440 is fixed
app.Init()
output += RstHeading(app.GetDocName(), '^')
......@@ -487,74 +491,108 @@ def GetApplicationTags(appname):
import shutil
def RstPageHeading(text):
def RstPageHeading(text, maxdepth):
output = RstHeading(text, "=") + linesep
output += ".. toctree::" + linesep
output += "\t:maxdepth: 2" + linesep
output += "\t:maxdepth: " + maxdepth + linesep
output += linesep + linesep
return output
def GenerateRstForApplications():
out = ""
blackList = ["TestApplication", "Example"]
appIndexFile = open('Applications.rst', 'w')
appNames = [app for app in otbApplication.Registry.GetAvailableApplications() if app not in blackList]
if not appNames:
print 'No OTB applications available. Please check ITK_AUTOLOAD_PATH env variable'
sys.exit(1)
sectionTags = ["Image Manipulation","Vector Data Manipulation", "Calibration","Geometry", "Image Filtering","Feature Extraction","Stereo","Learning","Segmentation"]
appIndexFile.write(RstPageHeading("Applications"))
for tag in sectionTags:
directory= "Applications/" + tag
if not os.path.exists(directory):
os.makedirs(directory)
tag_ = tag.replace(' ', '_')
appIndexFile.write('\tApplications/' + tag_ + '.rst' + linesep)
#chapterIndexFile = open('Applications/' + tag + '.rst', 'w')
#chapterIndexFile.write(RstPageHeading(tag))
#print linesep + RstHeading(tag, '=')
appsRemoved = []
for appName in appNames:
apptags = GetApplicationTags(appName)
if apptags.count(tag) > 0:
print "Generating " + appName + ".rst"
#chapterIndexFile.write("\t" + tag + '/' + appName + linesep)
appFile = open('Applications/app_' + appName + '.rst', 'w')
out = ApplicationToRst(appName)
appFile.write(out)
appFile.close()
appsRemoved.append(appName)
for appName in appsRemoved:
appNames.remove(appName)
#chapterIndexFile.close()
misctag = "Miscellaneous" #should this be Utilities
if not os.path.exists("Applications/" + misctag):
os.makedirs("Applications/" + misctag)
appIndexFile.write('\tApplications/' + misctag + linesep)
appIndexFile.close()
#miscChapterIndexFile = open("Applications/" + misctag + '.rst', 'w')
#miscChapterIndexFile.write(RstPageHeading(misctag))
blackList = ["TestApplication", "Example", "ApplicationExample"]
allApps = None
try:
allApps = otbApplication.Registry.GetAvailableApplications( )
except e:
print 'error in otbApplication.Registry.GetAvailableApplications()'
sys.exit(1)
# appNames = [app for app in otbApplication.Registry.GetAvailableApplications() if app not in blackList]
if not allApps:
print 'No OTB applications available. Please check OTB_APPLICATION_PATH env variable'
sys.exit(1)
# sectionTags = ["Image Manipulation",
# "Vector Data Manipulation",
# "Calibration","Geometry", "Image Filtering","Feature Extraction",
# "Stereo","Learning","Segmentation", "Miscellaneous"]
# for tag in sectionTags:
# #directory= "Applications/" + tag
# # if not os.path.exists(directory):
# # os.makedirs(directory)
# appIndexFile.write('\tApplications/' + tag.replace(' ', '_') + '.rst' + linese)
# #chapterIndexFile = open('Applications/' + tag + '.rst', 'w')
# #chapterIndexFile.write(RstPageHeading(tag))
# #print linesep + RstHeading(tag, '=')
#miscFile = open('Applications/Miscellaneous.rst', 'w')
# misctag = "Miscellaneous" #should this be Utilities
# if not os.path.exists("Applications/" + misctag):
# os.makedirs("Applications/" + misctag)
# appIndexFile.write('\tApplications/' + misctag + linesep)
writtenTags = []
appNames = [app for app in allApps if app not in blackList]
print "All apps: %s" % (appNames,)
appIndexFile = open(RST_DIR + '/Applications.rst', 'w')
appIndexFile.write(RstPageHeading("Applications", "2"))
for appName in appNames:
print "Generating " + appName + ".rst"
appFile = open("Applications/app_" + appName + ".rst", 'w')
tags = GetApplicationTags(appName)
if not tags:
print "No tags for application: " + appName
sys.exit(1)
tag = tags[0]
tag_ = tag
if tag.find(' '):
tag_ = tag.replace(' ', '_')
if not tag_:
print 'empty tag found for ' + appName
if not tag_ in writtenTags:
appIndexFile.write('\tApplications/' + tag_ + '.rst' + linesep)
writtenTags.append(tag_)
tagFileName = RST_DIR + '/Applications/' + tag_ + '.rst'
if os.path.isfile(tagFileName):
tagFile = open(tagFileName, 'a')
tagFile.write("\tapp_" + appName + linesep)
tagFile.close()
else:
tagFile = open(tagFileName, 'w')
tagFile.write( RstPageHeading(tag, "1") )
tagFile.write("\tapp_" + appName + linesep)
tagFile.close()
print "Generating " + appName + ".rst" + " on tag " + tag_
appFile = open(RST_DIR + '/Applications/app_' + appName + '.rst', 'w')
out = ApplicationToRst(appName)
appFile.write(out)
appFile.close()
#miscChapterIndexFile.write('\t' + misctag + '/' + appName + linesep)
out = ""
appIndexFile.close()
return out
# Start parsing options
parser = OptionParser(usage="Export application(s) to tex or pdf file.")
parser = OptionParser(usage="Export application(s) to rst file.")
parser.add_option("-a",dest="appname",help="Generate rst only for this application (eg: OrthoRectification)")
parser.add_option("-m",dest="module",help="Generate rst only for this module (eg: Image Manipulation)")
parser.add_option("-o",dest="rstdir",help="directory where rst files are generated")
(options, args) = parser.parse_args()
RST_DIR = options.rstdir
if not options.appname is None:
out = ApplicationToRst(options.appname)
#print out
......
Calibration
===========
.. toctree::
:maxdepth: 1
app_OpticalCalibration
app_SARCalibration
app_SARDecompositions
app_SARPolarMatrixConvert
app_SARPolarSynth
app_SarRadiometricCalibration
Feature Extraction
==================
.. toctree::
:maxdepth: 1
app_BinaryMorphologicalOperation
app_ComputePolylineFeatureFromImage
app_DSFuzzyModelEstimation
app_EdgeExtraction
app_GrayScaleMorphologicalOperation
app_HaralickTextureExtraction
app_HomologousPointsExtraction
app_LineSegmentDetection
app_LocalStatisticExtraction
app_MultivariateAlterationDetector
app_RadiometricIndices
app_SFSTextureExtraction
app_VectorDataDSValidation
Geometry
========
.. toctree::
:maxdepth: 1
app_BundleToPerfectSensor
app_ConvertCartoToGeoPoint
app_ConvertSensorToGeoPoint
app_GeneratePlyFile
app_GenerateRPCSensorModel
app_GridBasedImageResampling
app_ImageEnvelope
app_OrthoRectification
app_Pansharpening
app_RefineSensorModel
app_RigidTransformResample
app_Superimpose
Image Filtering
===============
.. toctree::
:maxdepth: 1
app_Despeckle
app_DimensionalityReduction
app_MeanShiftSmoothing
app_Smoothing
Image Manipulation
==================
.. toctree::
:maxdepth: 1
app_ColorMapping
app_ConcatenateImages
app_Convert
app_DEMConvert
app_DownloadSRTMTiles
app_ExtractROI
app_ManageNoData
app_MultiResolutionPyramid
app_Quicklook
app_ReadImageInfo
app_Rescale
app_SplitImage
app_TileFusion
Learning
========
.. toctree::
:maxdepth: 1
app_ClassificationMapRegularization
app_ComputeConfusionMatrix
app_ComputeImagesStatistics
app_FusionOfClassifications
app_ImageClassifier
app_KMeansClassification
app_MultiImageSamplingRate
app_PolygonClassStatistics
app_PredictRegression
app_SampleExtraction
app_SampleSelection
app_SOMClassification
app_TrainImagesClassifier
app_TrainRegression
app_TrainVectorClassifier
Miscellaneous
=============
.. toctree::
:maxdepth: 1
app_BandMath
app_BandMathX
app_CompareImages
app_HyperspectralUnmixing
app_KmzExport
app_OSMDownloader
app_ObtainUTMZoneFromGeoPoint
app_PixelValue
app_VertexComponentAnalysis
Segmentation
============
.. toctree::
:maxdepth: 1
app_ComputeOGRLayersFeaturesStatistics
app_ConnectedComponentSegmentation
app_HooverCompareSegmentation
app_LSMSSegmentation
app_LSMSSmallRegionsMerging
app_LSMSVectorization
app_OGRLayerClassifier
app_Segmentation
app_TrainOGRLayersClassifier
Stereo
======
.. toctree::
:maxdepth: 1
app_BlockMatching
app_DisparityMapToElevationMap
app_FineRegistration
app_StereoFramework
app_StereoRectificationGridGenerator
Vector Data Manipulation
========================
.. toctree::
:maxdepth: 1
app_ConcatenateVectorData
app_Rasterization
app_VectorDataExtractROI
app_VectorDataReprojection
app_VectorDataSetField
app_VectorDataTransform
......@@ -7,10 +7,6 @@ SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = @RST_BUILD_DIR@
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
......
......@@ -14,7 +14,12 @@
import sys
import os
import sphinx_rtd_theme
HAVE_RTD_THEME=False
try:
import sphinx_rtd_theme
HAVE_RTD_THEME=True
except:
pass
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
......@@ -31,10 +36,12 @@ import sphinx_rtd_theme
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
]
imgmath_latex='@LATEX_COMMAND@'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
......@@ -104,15 +111,17 @@ pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if HAVE_RTD_THEME:
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
......@@ -123,7 +132,7 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "Art/logo.png"
html_logo = "rst/Art/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
......@@ -165,13 +174,13 @@ html_static_path = ['_static']
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
......
......@@ -14,9 +14,10 @@ GIS/Image processing tools such as GDAL, GRASS GIS, OSSIM that can deal with num
Below code reads an input image using python pillow (PIL) and convert it to numpy array. This numpy array is
used an input to the application set *SetImageFromNumpyArray(...)* method.
The application used in this example is `ExtractROI <../Applications/app_ExtractROI.html>`_. After extracting
a small area the output image is taken as numpy array with *GetImageFromNumpyArray(...)* method
used an input to the application via *SetImageFromNumpyArray(...)* method.
The application used in this example is ExtractROI. After extracting
a small area the output image is taken as numpy array with *GetImageFromNumpyArray(...)* method thus avoid wiriting
output to a temporary file.
::
......
......@@ -333,9 +333,9 @@ model can’t be observed if the map projection is used. In order to
perform an ortho-rectification on this type of product, the map
projection has to be hidden from **Orfeo Toolbox** .
You can see if a product is an ortho-ready product by using tools such
as ``gdalinfo`` or `ReadImageInfo <../Applications/app_ReadImageInfo.html>`_, and check
if the product verifies the 2 following conditions :
You can see if a product is an ortho-ready product by using ``gdalinfo`` or
OTB ReadImageInfo application.
Check if your product verifies following two conditions :
- The product is in raw geometry : you should expect the presence of
RPC coefficients and a non-empty OSSIM keywordlist.
......
......@@ -457,12 +457,19 @@ roofs. Data is available in the OTB-Data
and this image is produced with the commands inside this
`file <http://hg.orfeo-toolbox.org/OTB-Applications/file/3ce975605013/Testing/Classification/CMakeLists.txt>`_ .
.. |image_21| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. |image_22| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif_fusion.jpg
.. |image_23| image:: ../Art/MonteverdiImages/classification_chain_fancyclassif.jpg
.. figure:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. figure:: ../Art/MonteverdiImages/classification_chain_fancyclassif_fusion.jpg
.. figure:: ../Art/MonteverdiImages/classification_chain_fancyclassif.jpg
.. _Figure2:
+---------------------------+---------------------------+---------------------------+
| |image_21| | |image_22| | |image_23| |
+---------------------------+---------------------------+---------------------------+
Figure 2: From left to right: Original image, result image with fusion (with monteverdi viewer) of original image and fancy classification and input image with fancy color classification from labeled image.
Figure 2: From left to right: Original image, result image with fusion (with monteverdi viewer) of original image and fancy classification and input image with fancy color classification from labeled image.
Fusion of classification maps
-----------------------------
......@@ -476,8 +483,7 @@ Classifications generates a single more robust and precise
classification map which combines the information extracted from the
input list of labeled images.
The *FusionOfClassifications* application has the following input
parameters :
The *FusionOfClassifications* application has the following input parameters:
- ``-il`` list of input labeled classification images to fuse
......@@ -516,30 +522,45 @@ The application can be used like this:
-out MVFusedClassificationMap.tif
Let us consider 6 independent classification maps of the same input
image (Cf. left image in `Figure 1`) generated from 6 different SVM models.
The `Figure 2` represents them after a color mapping by the same LUT.
image (Cf. left image in Figure2_) generated from 6 different SVM models.
The Figure3_ represents them after a color mapping by the same LUT.
Thus, 4 classes (water: blue, roads: gray,vegetation: green,
buildings with red roofs: red) are observable on each of them.
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C1_CM.png
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C2_CM.png
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C3_CM.png
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C4_CM.png
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C5_CM.png
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_C6_CM.png
.. |image_31| image:: ../Art/MonteverdiImages/QB_1_ortho_C1_CM.png
.. |image_32| image:: ../Art/MonteverdiImages/QB_1_ortho_C2_CM.png
.. |image_33| image:: ../Art/MonteverdiImages/QB_1_ortho_C3_CM.png
.. |image_34| image:: ../Art/MonteverdiImages/QB_1_ortho_C4_CM.png
.. |image_35| image:: ../Art/MonteverdiImages/QB_1_ortho_C5_CM.png
.. |image_36| image:: ../Art/MonteverdiImages/QB_1_ortho_C6_CM.png
.. _Figure3:
+---------------------------+---------------------------+---------------------------+
| |image_31| | |image_32| | |image_33| |
+---------------------------+---------------------------+---------------------------+
| |image_34| | |image_35| | |image_36| |
+---------------------------+---------------------------+---------------------------+
Figure 3: Six fancy colored classified images to be fused, generated from 6 different SVM models.
As an example of the *FusionOfClassifications* application by *majority
voting*, the fusion of the six input classification maps represented in
`Figure 3` leads to the classification map illustrated on the right in `Figure 4`.
Figure3_ leads to the classification map illustrated on the right in Figure4_.
Thus, it appears that this fusion highlights the more relevant classes among the six different
input classifications. The white parts of the fused image correspond to
the undecided class labels, i.e. to pixels for which there is not a
unique majority voting.
.. figure:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_MV_C123456_CM.png
.. |image_41| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. |image_42| image:: ../Art/MonteverdiImages/QB_1_ortho_MV_C123456_CM.png
.. _Figure4:
+------------------------------------------------+------------------------------------------------+
| |image_41| | |image_42| |
+------------------------------------------------+------------------------------------------------+
Figure 4: From left to right: Original image, and fancy colored classified image obtained by a majority voting fusion of the 6 classification maps represented in Fig. 4.13 (water: blue, roads: gray, vegetation: green, buildings with red roofs: red, undecided: white)
......@@ -586,16 +607,23 @@ The application can be used like this:
-out DSFusedClassificationMap.tif
As an example of the *FusionOfClassifications* application by *Dempster
Shafer*, the fusion of the six input classification maps represented in
`Figure 3` leads to the classification map illustrated on the right in `Figure 5`
[fig:ClassificationMapFusionApplicationDS]. Thus, it appears that this
fusion gives access to a more precise and robust classification map
Shafer*, the fusion of the six input classification maps represented in Figure3_
leads to the classification map illustrated on the right in Figure5_.
Thus, it appears that this fusion gives access to a more precise and robust classification map
based on the confidence level in each classifier.
.. figure:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. figure:: ../Art/MonteverdiImages/QB_1_ortho_DS_V_P_C123456_CM.png
Figure 5: From left to right: Original image, and fancy colored classified image obtained by a Dempster-Shafer fusion of the 6 classification maps represented in Fig. 4.13 (water: blue, roads: gray, vegetation: green, buildings with red roofs: red, undecided: white).
.. |image_51| image:: ../Art/MonteverdiImages/classification_chain_inputimage.jpg
.. |image_52| image:: ../Art/MonteverdiImages/QB_1_ortho_DS_V_P_C123456_CM.png
.. _Figure5:
+------------------------------------------------+------------------------------------------------+
| |image_51| | |image_52| |
+------------------------------------------------+------------------------------------------------+
Figure 5: From left to right: Original image, and fancy colored classified image obtained by a Dempster-Shafer fusion of the 6 classification maps represented in Figure3_ (water: blue, roads: gray, vegetation: green, buildings with red roofs: red, undecided: white).
Recommendations to properly use the fusion of classification maps
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
......@@ -701,15 +729,14 @@ Example
~~~~~~~
Resulting from the application presented in section :ref:`fancy_classification_results`
and illustrated in Fig. [fig:MeanShiftVectorImageFilter],
the Fig.[fig:ClassificationMapRegularizationApplication] shows a regularization
and illustrated in Figure2_, the Figure6_ shows a regularization
of a classification map composed of 4 classes: water, roads, vegetation
and buildings with red roofs. The radius of the ball shaped structuring
element is equal to 3 pixels, which corresponds to a ball included in a
7 x 7 pixels square. Pixels with more than one majority class keep their
original labels.