Open CI Scripts: Initial Commit
* build_helper: Python script which builds sets
of configurations from a json file input
* checkpatch: Bash scripts helping with running checkpatch
* cppcheck: Bash script helping with running cppcheck
* lava_helper: Python script which generates a lava job
definition and parses the output of a lava dispatcher
* tfm_ci_pylib: Generic Python module for Open CI
* configs: Directory storing reference configurations
Change-Id: Ibda0cbfeb5b004b35fef3c2af4cb5c012f2672b4
Signed-off-by: Galanakis, Minos <minos.galanakis@linaro.org>
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e736d3b
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+*.pyc
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 0000000..646f2a0
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.trustedfirmware.org
+port=29418
+project=ci/tf-m-ci-scripts
diff --git a/build_helper/build_helper.py b/build_helper/build_helper.py
new file mode 100755
index 0000000..ea8e8f3
--- /dev/null
+++ b/build_helper/build_helper.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python3
+
+""" build_helper.py:
+
+ Build helper instantiates a build manager with user provided arguments,
+ or default ones.
+ """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import os
+import sys
+import time
+import argparse
+import datetime
+from build_helper_configs import config_AN521
+
+try:
+ from tfm_ci_pylib.utils import get_cmd_args, load_json
+ from tfm_ci_pylib.tfm_build_manager import TFM_Build_Manager
+except ImportError:
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(os.path.join(dir_path, "../"))
+ from tfm_ci_pylib.utils import get_cmd_args, load_json
+ from tfm_ci_pylib.tfm_build_manager import TFM_Build_Manager
+
+
+def build(tfm_dir, build_dir, buid_report_f, build_config):
+ """ Instantiate a build manager class and build all configurations """
+
+ start_time = time.time()
+
+ bm = TFM_Build_Manager(tfm_dir=tfm_dir,
+ work_dir=build_dir,
+ cfg_dict=build_config,
+ report=buid_report_f,
+ install=True)
+ bm.start()
+ bm.join()
+ build_report = bm.get_report()
+ elapsed = time.time() - start_time
+ elapsed = str(datetime.timedelta(seconds=elapsed))
+ print("=============== Time Elapsed: %s ===================" % elapsed)
+ return bm.get_status(), build_report
+
+
+def main(user_args):
+ """ Main logic """
+
+ if user_args.config_f:
+ try:
+ build_config = load_json(user_args.config_f)
+ except Exception as e:
+ print("Failed to load config %s. Exception: %s" % (build_config,
+ e.msg))
+ sys.exit(1)
+ else:
+ build_config = config_AN521
+ # Build everything
+ build_status, build_report = build(user_args.tfm_dir,
+ user_args.build_dir,
+ user_args.report,
+ build_config)
+
+ if not build_report:
+ print("Build Report Empty, check build status")
+ sys.exit(1)
+
+ if build_status:
+ print("Build Failed")
+ sys.exit(1)
+ # pprint(build_report)
+ print("Build Complete!")
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+
+ # Calcuate the workspace root directory relative to the script location
+ # Equivalent to realpath $(dirname ./build_helper/build_helper.py)/../../
+ root_path = os.path.dirname(os.path.realpath(__file__))
+ for i in range(2):
+ root_path = os.path.split(root_path)[0]
+
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument("-b", "--build_dir",
+ dest="build_dir",
+ action="store",
+ default="./builds",
+ help="Where to generate the artifacts")
+ parser.add_argument("-c", "--config_file",
+ dest="config_f",
+ action="store",
+ help="Manual configuration override file (JSON)")
+ parser.add_argument("-r", "--report",
+ dest="report",
+ action="store",
+ help="JSON file containing build report")
+ parser.add_argument("-t", "--tfm_dir",
+ dest="tfm_dir",
+ action="store",
+ default=os.path.join(root_path, "tf-m"),
+ help="TFM directory")
+
+ main(get_cmd_args(parser=parser))
diff --git a/build_helper/build_helper_configs.py b/build_helper/build_helper_configs.py
new file mode 100644
index 0000000..b2976de
--- /dev/null
+++ b/build_helper/build_helper_configs.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+
+""" builtin_configs.py:
+
+ Default configuration files used as reference """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+
+# Configure build manager to build several combinations
+config_AN521 = {"platform": ["AN521"],
+ "compiler": ["GNUARM"],
+ "config": ["ConfigRegression",
+ "ConfigDefault",
+ "ConfigCoreTest"],
+ "build": ["Debug"],
+ "with_mcuboot": [True],
+ # invalid configuations can be added as tuples of adjustable
+ # resolution "AN521" will reject all combinations for that
+ # platform while ("AN521", "GNUARM") will only reject GCC ones
+ "invalid": []
+ }
+
+_builtin_configs = {"AN521_gnuarm_Config_DRC": config_AN521}
+
+if __name__ == '__main__':
+ import os
+ import sys
+ try:
+ from tfm_ci_pylib.utils import export_config_map
+ except ImportError:
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(os.path.join(dir_path, "../"))
+ from tfm_ci_pylib.utils import export_config_map
+
+ if len(sys.argv) == 2:
+ if sys.argv[1] == "--export":
+ export_config_map(_builtin_configs)
+ if len(sys.argv) == 3:
+ if sys.argv[1] == "--export":
+ export_config_map(_builtin_configs, sys.argv[2])
diff --git a/checkpatch/checkpatch.conf b/checkpatch/checkpatch.conf
new file mode 100644
index 0000000..e49ba0f
--- /dev/null
+++ b/checkpatch/checkpatch.conf
@@ -0,0 +1,20 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+--terse
+--no-tree
+--ignore COMPLEX_MACRO
+--ignore AVOID_EXTERNS
+--ignore VOLATILE
+--ignore PREFER_KERNEL_TYPES
+--ignore LEADING_SPACE
+--ignore CODE_INDENT
+--ignore SUSPECT_CODE_INDENT
+--ignore BRACES
+--ignore CONST_STRUCT
+--ignore INITIALISED_STATIC
+--show-types
diff --git a/configs/readme.md b/configs/readme.md
new file mode 100644
index 0000000..7f65d41
--- /dev/null
+++ b/configs/readme.md
@@ -0,0 +1,23 @@
+## This folder should be used to export JSON configuration for ci scripts
+
+Configuration classes can be called to export their supported configuration map
+to JSON files. Those files can be used to directly control the CI through the
+corresponding text fields.
+
+Reference configuration files can be created when calling a _config module
+with --export command, followed by an optional target directory
+
+`python xxx_config.py --export (out_dir)`
+
+If out_dir is not defined configuration will be exported in current working
+directory.
+
+At release date the system will generate two configuration files for building
+and testing AN521 reference platform
+
+~~~~~
+python lava_helper/lava_helper_configs.py --export ./configs
+python build_helper/build_helper_configs.py --export ./configs
+~~~~~
+
+*Copyright (c) 2018-2019, Arm Limited. All rights reserved.*
diff --git a/cppcheck/arm-cortex-m.cfg b/cppcheck/arm-cortex-m.cfg
new file mode 100644
index 0000000..5da59d6
--- /dev/null
+++ b/cppcheck/arm-cortex-m.cfg
@@ -0,0 +1,89 @@
+<?xml version="1.0"?>
+<!--
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+-->
+<def format="2">
+ <!-- Minimum-width integer types -->
+ <podtype name="int_least8_t" sign="s" size="1"/>
+ <podtype name="uint_least8_t" sign="u" size="1"/>
+ <podtype name="int_least16_t" sign="s" size="2"/>
+ <podtype name="uint_least16_t" sign="u" size="2"/>
+ <podtype name="int_least32_t" sign="s" size="4"/>
+ <podtype name="uint_least32_t" sign="u" size="4"/>
+ <podtype name="int_least64_t" sign="s" size="8"/>
+ <podtype name="uint_least64_t" sign="u" size="8"/>
+ <!-- Greatest-width integer types -->
+ <podtype name="intmax_t" sign="s" size="8"/>
+ <podtype name="uintmax_t" sign="u" size="8"/>
+ <!-- inttypes.h -->
+ <podtype name="intptr_t" sign="s" size="4"/>
+ <podtype name="uintptr_t" sign="u" size="4"/>
+ <define name="INT8_MAX" value="0x7f"/>
+ <define name="INT8_MIN" value="(-INT8_MAX - 1)"/>
+ <define name="UINT8_MAX" value="(__CONCAT(INT8_MAX, U) * 2U + 1U)"/>
+ <define name="INT16_MAX" value="0x7fff"/>
+ <define name="INT16_MIN" value="(-INT16_MAX - 1)"/>
+ <define name="UINT16_MAX" value="(__CONCAT(INT16_MAX, U) * 2U + 1U)"/>
+ <define name="INT32_MAX" value="0x7fffffffL"/>
+ <define name="INT32_MIN" value="(-INT32_MAX - 1L)"/>
+ <define name="UINT32_MAX" value="(__CONCAT(INT32_MAX, U) * 2UL + 1UL)"/>
+ <define name="INT64_MAX" value="0x7fffffffffffffffLL"/>
+ <define name="INT64_MIN" value="(-INT64_MAX - 1LL)"/>
+ <define name="UINT64_MAX" value="(__CONCAT(INT64_MAX, U) * 2ULL + 1ULL)"/>
+ <!-- Limits of minimum-width integer types -->
+ <define name="INT_LEAST8_MAX" value="INT8_MAX"/>
+ <define name="INT_LEAST8_MIN" value="INT8_MIN"/>
+ <define name="UINT_LEAST8_MAX" value="UINT8_MAX"/>
+ <define name="INT_LEAST16_MAX" value="INT16_MAX"/>
+ <define name="INT_LEAST16_MIN" value="INT16_MIN"/>
+ <define name="UINT_LEAST16_MAX" value="UINT16_MAX"/>
+ <define name="INT_LEAST32_MAX" value="INT32_MAX"/>
+ <define name="INT_LEAST32_MIN" value="INT32_MIN"/>
+ <define name="UINT_LEAST32_MAX" value="UINT32_MAX"/>
+ <define name="INT_LEAST64_MAX" value="INT64_MAX"/>
+ <define name="INT_LEAST64_MIN" value="INT64_MIN"/>
+ <define name="UINT_LEAST64_MAX" value="UINT64_MAX"/>
+ <!-- Limits of fastest minimum-width integer types -->
+ <define name="INT_FAST8_MAX" value="INT8_MAX"/>
+ <define name="INT_FAST8_MIN" value="INT8_MIN"/>
+ <define name="UINT_FAST8_MAX" value="UINT8_MAX"/>
+ <define name="INT_FAST16_MAX" value="INT16_MAX"/>
+ <define name="INT_FAST16_MIN" value="INT16_MIN"/>
+ <define name="UINT_FAST16_MAX" value="UINT16_MAX"/>
+ <define name="INT_FAST32_MAX" value="INT32_MAX"/>
+ <define name="INT_FAST32_MIN" value="INT32_MIN"/>
+ <define name="UINT_FAST32_MAX" value="UINT32_MAX"/>
+ <define name="INT_FAST64_MAX" value="INT64_MAX"/>
+ <define name="INT_FAST64_MIN" value="INT64_MIN"/>
+ <define name="UINT_FAST64_MAX" value="UINT64_MAX"/>
+ <!-- Limits of integer types capable of holding object pointers -->
+ <define name="INTPTR_MAX" value="INT32_MAX"/>
+ <define name="INTPTR_MIN" value="INT32_MIN"/>
+ <define name="UINTPTR_MAX" value="UINT32_MAX"/>
+ <!-- Limits of greatest-width integer types -->
+ <define name="INTMAX_MAX" value="INT64_MAX"/>
+ <define name="INTMAX_MIN" value="INT64_MIN"/>
+ <define name="UINTMAX_MAX" value="UINT64_MAX"/>
+ <!-- Limits of other integer types -->
+ <define name="PTRDIFF_MAX" value="INT32_MAX"/>
+ <define name="PTRDIFF_MIN" value="INT32_MIN"/>
+ <define name="SIG_ATOMIC_MAX" value="INT8_MAX"/>
+ <define name="SIG_ATOMIC_MIN" value="INT8_MIN"/>
+ <define name="SIZE_MAX" value="(__CONCAT(INT16_MAX, U))"/>
+ <!-- Macros for integer constants -->
+ <define name="INT8_C(value)" value="((int8_t) value)"/>
+ <define name="UINT8_C(value)" value="((uint8_t) __CONCAT(value, U))"/>
+ <define name="INT16_C(value)" value="value"/>
+ <define name="UINT16_C(value)" value="__CONCAT(value, U)"/>
+ <define name="INT32_C(value)" value="__CONCAT(value, L)"/>
+ <define name="UINT32_C(value)" value="__CONCAT(value, UL)"/>
+ <define name="INT64_C(value)" value="__CONCAT(value, LL)"/>
+ <define name="UINT64_C(value)" value="__CONCAT(value, ULL)"/>
+ <define name="INTMAX_C(value)" value="__CONCAT(value, LL)"/>
+ <define name="UINTMAX_C(value)" value="__CONCAT(value, ULL)"/>
+</def>
diff --git a/cppcheck/tfm-suppress-list.txt b/cppcheck/tfm-suppress-list.txt
new file mode 100644
index 0000000..368edb6
--- /dev/null
+++ b/cppcheck/tfm-suppress-list.txt
@@ -0,0 +1,55 @@
+//-------------------------------------------------------------------------------
+// Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+//
+// SPDX-License-Identifier: BSD-3-Clause
+//
+//-------------------------------------------------------------------------------
+
+//This file contains suppression rules for CppCheck.
+//C style comments can be used.
+//
+//Each line has three parts separated by ":"
+//<message id>:<file>:<line num>
+//Where:
+// <message id> is the message id to suppress. This equals to the "id" filed
+// of the XML message record.
+// <file> Is a file name. cppcheck understands the following wildcards:
+// * matches any number of any characters
+// ? a single character
+// Note: please use '/' as directory separator.
+// <line num> The line number for which the message shall be ignored.
+//
+//Example to convert an XML record to a line in thif file:
+// <error id="invalidPrintfArgType_sint" severity="warning" msg="%d in format string (no. 1) requires 'int' but the argument type is 'unsigned int'." verbose="%d in format string (no. 1) requires 'int' but the argument type is 'unsigned int'." cwe="686">
+// <location file0="C:\work\tf-m\test\framework\test_framework.c" file="C:\work\tf-m\test\framework\test_framework.c" line="150"/>
+// </error>
+// to
+//invalidPrintfArgType_sint:*/tf-m/test/framework/test_framework.c:150
+//
+
+//This rule conflicts the our coding style document.
+variableScope
+
+//CppCheck fails to understand macro definitions in compile_commands.json, which
+//have \ characters escaping the opening and closing ". As a result we get the
+//following false alarms.
+preprocessorErrorDirective:*/mbedtls/platform.h:29
+preprocessorErrorDirective:*/mbedtls/sha256.h:29
+
+//CppCheck ignores macros defined on the command line when using a project file
+//(e.g. compile_commands.json). As a result we ca not set compiler specific
+//macros and need to suppress the following error.
+preprocessorErrorDirective:*/cmsis_compiler.h:320
+
+//While cppcheck states to work fine with missing standard library files, it
+//still reports a lot of errors regarding those.
+//So, ignore these.
+missingIncludeSystem
+
+//Stos cppcheck report errors regarding supression rules. These seem to be
+//buggy.
+unmatchedSuppression
+
+//arm_cmse.h is a special system include, stop complaining about it.
+missingInclude:*/tfm_core.h:11
+missingInclude:*/tfm_secure_api.h:11
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000..cb3200d
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,10 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2017, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+#Ignore doxygen output directories
+html/
+rtf/
+html_*.zip
diff --git a/doc/Doxyfile b/doc/Doxyfile
new file mode 100644
index 0000000..36cb0af
--- /dev/null
+++ b/doc/Doxyfile
@@ -0,0 +1,2457 @@
+# Doxyfile 1.8.12
+
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "TF-M CI Scripts"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES =
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING = sh=C no_extension=C
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 0.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 0
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = NO
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = ../
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
+# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
+# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
+# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf.
+
+FILE_PATTERNS = *.dox \
+ *.sh \
+ *.py \
+ [^.]+
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH = ./
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS = *.sh="sed -n 's/^#!\(.*\)/\1/p;tx;s/^##\(.*\)/\/\/!\1/p;tx;g;p;:x'"\
+ *.dox=cat \
+ *="sed -n 's/^#!\(.*\)/\1/p;tx;s/^##\(.*\)/\/\/!\1/p;tx;g;p;:x'"
+
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# generated with the -Duse-libclang=ON option for CMake.
+# The default value is: NO.
+
+CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+CLANG_OPTIONS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET = image-left.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = NO
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the master .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = yes
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. The package can be specified just
+# by its name or with the correct syntax as to be used with the LaTeX
+# \usepackage command. To get the times font for instance you can specify :
+# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
+# To use the option intlimits with the amsmath package you can specify:
+# EXTRA_PACKAGES=[intlimits]{amsmath}
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
+# page will contain the date and time when the page was generated. Setting this
+# to NO can help when comparing the output of multiple runs.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_TIMESTAMP = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of 'which perl').
+# The default file (with absolute path) is: /usr/bin/perl.
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = NO
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see:
+# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: NO.
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command. Disabling a call graph can be
+# accomplished by means of the command \hidecallgraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command. Disabling a caller graph can be
+# accomplished by means of the command \hidecallergraph.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. For an explanation of the image formats see the section
+# output formats in the documentation of the dot tool (Graphviz (see:
+# http://www.graphviz.org/)).
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo,
+# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
+# png:gdiplus:gdiplus.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/doc/doxygen-sh.dox b/doc/doxygen-sh.dox
new file mode 100644
index 0000000..08cb2c3
--- /dev/null
+++ b/doc/doxygen-sh.dox
@@ -0,0 +1,95 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+/*! @page pgSoxygenSh Using Doxygen for documenting shell scripts
+
+To overcome Doxygen's shortcoming of not understand shell scripts, a basic
+filter is used to convert the shell scripts to C source. The filter will keep
+lines starting with '##' or '#!' and drop all other lines.
+
+Doxygen must find the C declaration of all documented entities and thus
+fake definitions of each documented entity must be added. It is planned to
+develop a smarter filter program which will eliminate the need of fake C code.
+This task is lowest possible priority though.
+
+@section pgDoxygenShs0 Instrumenting shell scrips
+- Use <em>##</em> to add the Doxygen documentation (i.e. stuff you would add to C
+comments in C source files).
+- Use <em>#!</em> for to add fake C code.
+- The documented item and the and the fake C conterpart must match. I.e. the
+ fingerprint of a variable definition must match the fake C declaration.
+
+Notes:
+ - You can use a "type" like <em>string</em> or <em>path</em> to make more
+ specific what a variable is expected to hold or a function is expected to
+ return. These are still is just documentation hoint's as the
+ "shell comand language" does not support types.
+
+@section pgDoxygenShs1 Example file documentation
+To get any documentation genertased for a shell script file, please incude a
+<em>\@file</em> special command in it.
+
+@code{.sh}
+##
+##@file
+##@brief Shell documentation examples
+##
+##Detailed description.
+##
+@endcode
+
+This is how the generated documentation looks like: \ref examples.sh
+
+@section pgDoxygenShs2 Example variable documentation
+@code{.sh}
+##@var string example_variable
+##@brief Example variable
+##
+##This is an example to show ho to document variables.
+##
+#This is needed for doxygen for now.
+#!string example_variable;
+@endcode
+
+This is how the generated documentation looks like: \ref example_variable.
+
+Notes:
+ - The variable definition at line 1 and line 7, i.e. the documented variable
+ and the fake C conterpart must match.
+
+@section pgDoxygenShs3 Example function documentation
+@code{.sh}
+##@fn example_function(path build_base_dir, string build_config_name)
+##@brief En example function.
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@returns N/A
+##
+##This function was only made to show how-to document a function.
+##
+##Usage:
+## command | result
+## --------|-------
+## example_function "test_build_st32" "somestring" | Do whatever is done.
+##
+#This is needed for doxygen for now.
+#!void example_function(path build_base_dir, string build_config_name){};
+#
+@endcode
+
+This is how the generated documentation looks like: \ref example_function.
+
+Notes:
+ - The function definition at line 1 and line 15, i.e. the documented function
+ and the fake C conterpart must match.
+
+
+@section pgSoxygenShs2 General documentation
+All documentation which is not documenting source code (shell script)
+shall be added to *.dox files.
+
+*/
diff --git a/doc/examples.sh b/doc/examples.sh
new file mode 100755
index 0000000..2aa8d18
--- /dev/null
+++ b/doc/examples.sh
@@ -0,0 +1,39 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+##
+##@file
+##@brief Shell documentation examples
+##
+
+
+##@var string example_variable
+##@brief Example variable
+##
+##This is an example to show ho to document variables.
+##
+#This is needed for doxygen for now.
+#!string example_variable;
+
+
+##@fn example_function(path build_base_dir, string build_config_name)
+##@brief En example function.
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@returns N/A
+##
+##This function was only made to show how-to document a function.
+##
+##Usage:
+## command | result
+## --------|-------
+## example_function "test_build_st32" "somestring" | Do whatever is done.
+##
+#This is needed for doxygen for now.
+#!void example_function(path build_base_dir, string build_config_name){};
+#
+
diff --git a/doc/image-left.css b/doc/image-left.css
new file mode 100644
index 0000000..8b637ad
--- /dev/null
+++ b/doc/image-left.css
@@ -0,0 +1,10 @@
+/* -----------------------------------------------------------------------------
+# Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+------------------------------------------------------------------------------*/
+.image
+{
+ text-align: left;
+}
diff --git a/doc/makefile b/doc/makefile
new file mode 100644
index 0000000..9997424
--- /dev/null
+++ b/doc/makefile
@@ -0,0 +1,36 @@
+# -----------------------------------------------------------------------------\
+# Copyright (c) 2017-2019, Arm Limited and Contributors. All rights reserved. \
+# \
+# SPDX-License-Identifier: BSD-3-Clause \
+# \
+------------------------------------------------------------------------------*/
+
+.PHONY: all
+
+COMPR=7z
+
+ifeq ($(OS),Windows_NT)
+COMPR=7z.exe
+endif
+
+COMPR_FLAGS=a -tzip
+
+git_hash:=$(shell git log --format="%cd_%h" --date=short -n 1)
+
+archive_name=html_${git_hash}.zip
+
+.PHONY: all
+all: html zip
+
+html:
+ doxygen Doxyfile
+
+zip: ${archive_name}
+
+${archive_name}: html
+ "${COMPR}" ${COMPR_FLAGS} $@ $?
+
+.PHONY: clean
+clean:
+ -rm html_*.zip
+ -rm -rf html
diff --git a/doc/readme.dox b/doc/readme.dox
new file mode 100644
index 0000000..61dbf0c
--- /dev/null
+++ b/doc/readme.dox
@@ -0,0 +1,74 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+/*! @mainpage TF-M CI Script Documentation
+
+@section Introduction
+This repository holds build scripts used by "Continuous Integration" jobs
+executed on Linaro Jenkins infrastructure.
+
+These script leave "provisioning" of the environment to the executor, and thus
+will not install compilers and other needed tools, clone repositories and so on.
+
+The intention is to make these tools execute fine not only on the Jenkins
+servers, but also on developer PC:s and thus these scripts provide both a
+"centralised" and a "distributed" CI environment.
+
+@section documenting Writing/maintaining this documentation
+This documentation is Doxygen based. Please use the makefile in this directory
+to generate html output.
+
+The Doxygen configuration is made in a way to process all files with a .sh or
+.dox extension. Still output will only be generated for documented files, i.e.
+files with a <em>\@file</em> tag.
+
+For details on how to instrument the shell scripts please refer to
+\ref pgSoxygenSh.
+
+@tableofcontents
+@section ReadmeS1 Building tf-m.
+
+Python build documents are preliminary and will be included in future revisions.
+
+Each Python script contains built-in help which can be accessed by -h, --help
+command line argument.
+
+In order to build all configurations use build_helper.py
+
+@code{.sh}
+# Will build all configurations to build-all-dir and
+# create a report in json format.
+
+./ci-scripts/build_helper/build_helper.py -b build-all-dir -r report.json
+@endcode
+
+
+@section ReadmeS2 Static analysis of TF-M.
+The CI system currently supports chakpatch and cppcheck to analyze TF-M source
+content.
+
+@subsection ReadmeS2S1 Cppcheck
+The script \ref run-cppcheck.sh can be used to execute cppcheck. For details
+please refer to the documentation of the script.
+
+The script \ref make_cppcheck_summary.sh can be used to genetare a summary of
+XML output files generated by cppcheck.
+
+@subsection ReadmeS2S2 Checkpatch
+The script \ref run-checkpatch.sh can be used to execute checkpatch. For details
+please refer to the documentation of the script.
+
+The script @ref make_checkpatch_summary.sh can be used to generate a summary
+report file of chackpatch results.
+
+@section pgindex_r1 Table of contents
+- @ref run-checkpatch.sh
+- @ref make_checkpatch_summary.sh
+- @ref run-cppcheck.sh
+- @ref make_cppcheck_summary.sh
+- @ref util_cmake.sh
+*/
diff --git a/lava_helper/jinja2_templates/template_tfm_mps2_sse_200.jinja2 b/lava_helper/jinja2_templates/template_tfm_mps2_sse_200.jinja2
new file mode 100644
index 0000000..858e71f
--- /dev/null
+++ b/lava_helper/jinja2_templates/template_tfm_mps2_sse_200.jinja2
@@ -0,0 +1,73 @@
+{#------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-----------------------------------------------------------------------------#}
+device_type: {{ device_type }}
+job_name: {{ job_name }}
+
+timeouts:
+ job:
+ minutes: {{ job_timeout }}
+ action:
+ minutes: {{ action_timeout }}
+ connections:
+ lava-test-monitor:
+ minutes: {{ monitor_timeout }}
+
+priority: medium
+visibility: public
+
+actions:
+{%- for platform, recovery in platforms.items() -%}
+ {%- for compiler in compilers %}
+ {%- for build_type in build_types %}
+ {%- for boot_type in boot_types %}
+ {%- for name, test in tests.items() -%}
+ - deploy:
+ to: mps
+ images:
+ recovery_image:
+ url: {{ recovery_store_url }}/lastSuccessfulBuild/artifact/{{ recovery }}
+ compression: gz
+ namespace: target
+
+ - deploy:
+ to: mps
+ images:
+ test_binary:
+ url: {{artifact_store_url}}/{{ build_no}}/artifact/build-ci-all/{{ platform }}_{{ compiler }}_Config{{ name }}_{{ build_type }}_{{ boot_type }}/{{ test.binaries.firmware }}
+ namespace: target
+
+ - deploy:
+ to: mps
+ images:
+ test_binary:
+ url: {{artifact_store_url}}/{{ build_no}}/artifact/build-ci-all/{{ platform }}_{{ compiler }}_Config{{ name }}_{{ build_type }}_{{ boot_type }}/{{ test.binaries.bootloader }}
+ namespace: target
+
+ - boot:
+ method: minimal
+ timeout:
+ minutes: 10
+ namespace: target
+
+ - test:
+ namespace: target
+ delay: 5
+ monitors:
+ {%- for monitor in test.monitors %}
+ - name: "{{monitor.name}}_{{ platform }}_{{ compiler }}_{{ name }}_{{ build_type }}_{{ boot_type }}"
+ start: "{{monitor.start}}"
+ end: "{{monitor.end}}"
+ pattern: "{{monitor.pattern}}"
+ fixupdict:
+ '{{monitor.fixup.pass}}': pass
+ '{{monitor.fixup.fail}}': fail
+ {%- endfor %}
+ {%- endfor %}
+ {%- endfor %}
+ {%- endfor %}
+ {%- endfor %}
+{%- endfor %}
diff --git a/lava_helper/lava_helper.py b/lava_helper/lava_helper.py
new file mode 100755
index 0000000..4e8ed88
--- /dev/null
+++ b/lava_helper/lava_helper.py
@@ -0,0 +1,445 @@
+#!/usr/bin/env python3 -u
+
+""" lava_helper.py:
+
+ Generate custom defined LAVA definitions redered from Jinja2 templates.
+ It can also parse the yaml output of LAVA and verify the test outcome """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import os
+import sys
+import argparse
+from copy import deepcopy
+from collections import OrderedDict
+from jinja2 import Environment, FileSystemLoader
+from lava_helper_configs import *
+
+try:
+ from tfm_ci_pylib.utils import save_json, load_json, sort_dict,\
+ load_yaml, test
+ from tfm_ci_pylib.lava_rpc_connector import LAVA_RPC_connector
+except ImportError:
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(os.path.join(dir_path, "../"))
+ from tfm_ci_pylib.utils import save_json, load_json, sort_dict,\
+ load_yaml, test
+ from tfm_ci_pylib.lava_rpc_connector import LAVA_RPC_connector
+
+
+def sort_lavagen_config(cfg):
+ """ Create a constact dictionary object. This method is tailored for
+ the complicated configuration structure of this module """
+
+ res = OrderedDict()
+ if sorted(lavagen_config_sort_order) == sorted(cfg.keys()):
+ item_list = sorted(cfg.keys(),
+ key=lambda x: lavagen_config_sort_order.index(x))
+ else:
+ item_list = sorted(cfg.keys(), key=len)
+ for k in item_list:
+ v = cfg[k]
+ if isinstance(v, dict):
+ res[k] = sort_lavagen_config(v)
+ elif isinstance(v, list) and isinstance(v[0], dict):
+ res[k] = [sort_dict(e, lava_gen_monitor_sort_order) for e in v]
+ else:
+ res[k] = v
+ return res
+
+
+def save_config(cfg_f, cfg_obj):
+ """ Export configuration to json file """
+ save_json(cfg_f, sort_lavagen_config(cfg_obj))
+
+
+def print_configs():
+ """ Print supported configurations """
+
+ print("%(pad)s Built-in configurations: %(pad)s" % {"pad": "*" * 10})
+ for k in lava_gen_config_map.keys():
+ print("\t * %s" % k)
+
+
+def generate_test_definitions(config, work_dir):
+ """ Get a dictionary configuration, and an existing jinja2 template
+ and generate a LAVA compatbile yaml definition """
+
+ template_loader = FileSystemLoader(searchpath=work_dir)
+ template_env = Environment(loader=template_loader)
+
+ # Ensure that the jinja2 template is always rendered the same way
+ config = sort_lavagen_config(config)
+
+ template_file = config.pop("templ")
+
+ definition = template_env.get_template(template_file).render(**config)
+ return definition
+
+
+def generate_lava_job_defs(user_args, config):
+ """ Create a LAVA test job definition file """
+
+ # Evaluate current directory
+ if user_args.work_dir:
+ work_dir = os.path.abspath(user_args.work_dir)
+ else:
+ work_dir = os.path.abspath(os.path.dirname(__file__))
+
+ # If a single platform is requested and it exists in the platform
+ if user_args.platform and user_args.platform in config["platforms"]:
+ # Only test this platform
+ platform = user_args.platform
+ config["platforms"] = {platform: config["platforms"][platform]}
+
+ # Generate the ouptut definition
+ definition = generate_test_definitions(config, work_dir)
+
+ # Write it into a file
+ out_file = os.path.abspath(user_args.lava_def_output)
+ with open(out_file, "w") as F:
+ F.write(definition)
+
+ print("Definition created at %s" % out_file)
+
+
+def test_map_from_config(lvg_cfg=tfm_mps2_sse_200):
+ """ Extract all required information from a lavagen config map
+ and generate a map of required tests, indexed by test name """
+
+ test_map = {}
+ suffix_l = []
+ for p in lvg_cfg["platforms"]:
+ for c in lvg_cfg["compilers"]:
+ for bd in lvg_cfg["build_types"]:
+ for bt in lvg_cfg["boot_types"]:
+ suffix_l.append("%s_%s_%s_%s_%s" % (p, c, "%s", bd, bt))
+
+ for test_cfg_name, tst in lvg_cfg["tests"].items():
+ for monitor in tst["monitors"]:
+ for suffix in suffix_l:
+ key = (monitor["name"] + "_" + suffix % test_cfg_name).lower()
+ # print (monitor['required'])
+ test_map[key] = monitor['required']
+
+ return deepcopy(test_map)
+
+
+def test_lava_results(user_args, config):
+ """ Uses input of a test config dictionary and a LAVA summary Files
+ and determines if the test is a successful or not """
+
+ # Parse command line arguments to override config
+ result_raw = load_yaml(user_args.lava_results)
+
+ test_map = test_map_from_config(config)
+ t_dict = {k: {} for k in test_map}
+
+ # Return true if test is contained in test_groups
+ def test_filter(x):
+ return x["metadata"]['definition'] in test_map
+
+ # Create a dictionary with common keys as the test map and test results
+ # {test_suite: {test_name: pass/fail}}
+ def format_results(x):
+ t_dict[x["metadata"]["definition"]].update({x["metadata"]["case"]:
+ x["metadata"]["result"]})
+
+ # Remove all irelevant entries from data
+ test_results = list(filter(test_filter, result_raw))
+
+ # Call the formatter
+ list(map(format_results, test_results))
+
+ # We need to check that each of the tests contained in the test_map exist
+ # AND that they have a passed status
+ t_sum = 0
+ for k, v in t_dict.items():
+ try:
+ t_sum += int(test(test_map[k],
+ v,
+ pass_text=["pass"],
+ error_on_failed=False,
+ test_name=k,
+ summary=user_args.lava_summary)["success"])
+ # Status can be None if a test did't fully run/complete
+ except TypeError as E:
+ t_sum = 1
+
+ # Every single of the tests need to have passed for group to succeed
+ if t_sum != len(t_dict):
+ print("Group Testing FAILED!")
+ sys.exit(1)
+ print("Group Testing PASS!")
+
+
+def test_lava_dispatch_credentials(user_args):
+ """ Will validate if provided token/credentials are valid. It will return
+ a valid connection or exit program if not"""
+
+ # Collect the authentication tokens
+ try:
+ if user_args.token_from_env:
+ usr = os.environ['LAVA_USER']
+ secret = os.environ['LAVA_TOKEN']
+ elif user_args.token_usr and user_args.token_secret:
+ usr = user_args.token_usr
+ secret = user_args.token_secret
+
+ # Do not submit job without complete credentials
+ if not len(usr) or not len(secret):
+ raise Exception("Credentials not set")
+
+ lava = LAVA_RPC_connector(usr,
+ secret,
+ user_args.lava_url,
+ user_args.lava_rpc)
+
+ # Test the credentials againist the backend
+ if not lava.test_credentials():
+ raise Exception("Server rejected user authentication")
+ except Exception as e:
+ print("Credential validation failed with : %s" % e)
+ print("Did you set set --lava_token_usr, --lava_token_secret?")
+ sys.exit(1)
+ return lava
+
+
+def lava_dispatch(user_args):
+ """ Submit a job to LAVA backend, block untill it is completed, and
+ fetch the results files if successful. If not, calls sys exit with 1
+ return code """
+
+ lava = test_lava_dispatch_credentials(user_args)
+ job_id, job_url = lava.submit_job(user_args.dispatch)
+ print("Job submitted at: " + job_url)
+ with open("lava_job.id", "w") as F:
+ F.write(str(job_id))
+ print("Job id %s stored at lava_job.id file." % job_id)
+
+ # Wait for the job to complete
+ status = lava.block_wait_for_job(job_id, int(user_args.dispatch_timeout))
+ print("Job %s returned with status: %s" % (job_id, status))
+ if status == "Complete":
+ lava.get_job_results(job_id, user_args.lava_job_results)
+ print("Job results exported at: %s" % user_args.lava_job_results)
+ sys.exit(0)
+ sys.exit(1)
+
+
+def dispatch_cancel(user_args):
+ """ Sends a cancell request for user provided job id (dispatch_cancel)"""
+ lava = test_lava_dispatch_credentials(user_args)
+ id = user_args.dispatch_cancel
+ result = lava.cancel_job(id)
+ print("Request to cancell job: %s returned with status %s" % (id, result))
+
+
+def load_config_overrides(user_args):
+ """ Load a configuration from multiple locations and override it with
+ user provided arguemnts """
+
+ if user_args.config_file:
+ print("Loading config from file %s" % user_args.config_file)
+ try:
+ config = load_json(user_args.config_file)
+ except Exception:
+ print("Failed to load config from: %s ." % user_args.config_file)
+ sys.exit(1)
+ else:
+ print("Using built-in config: %s" % user_args.config_key)
+ try:
+ config = lava_gen_config_map[user_args.config_key]
+ except KeyError:
+ print("No template found for config: %s" % user_args.config_key)
+ sys.exit(1)
+
+ config["build_no"] = user_args.build_no
+
+ # Add the template folder
+ config["templ"] = os.path.join(user_args.template_dir, config["templ"])
+ return config
+
+
+def main(user_args):
+ """ Main logic, forked according to task arguments """
+
+ # If a configuration listing is requested
+ if user_args.ls_config:
+ print_configs()
+ return
+ elif user_args.cconfig:
+ config_key = user_args.cconfig
+ if config_key in lava_gen_config_map.keys():
+ config_file = "lava_job_gen_cfg_%s.json" % config_key
+ save_config(config_file, lava_gen_config_map[config_key])
+ print("Configuration exported at %s" % config_file)
+ return
+ else:
+ config = load_config_overrides(user_args)
+
+ # Configuration is assumed fixed at this point
+ if user_args.lava_results:
+ print("Evaluating File", user_args.lava_results)
+ test_lava_results(user_args, config)
+ elif user_args.dispatch:
+ lava_dispatch(user_args)
+ elif user_args.dispatch_cancel:
+ dispatch_cancel(user_args)
+ elif user_args.create_definition:
+ print("Generating Lava")
+ generate_lava_job_defs(user_args, config)
+ else:
+ print("Nothing to do, please select a task")
+
+
+def get_cmd_args():
+ """ Parse command line arguments """
+
+ # Parse command line arguments to override config
+ parser = argparse.ArgumentParser(description="Lava Helper")
+
+ def_g = parser.add_argument_group('Create LAVA Definition')
+ disp_g = parser.add_argument_group('Dispatch LAVA job')
+ parse_g = parser.add_argument_group('Parse LAVA results')
+ config_g = parser.add_argument_group('Configuration handling')
+ over_g = parser.add_argument_group('Overrides')
+
+ # Configuration control
+ config_g.add_argument("-cn", "--config-name",
+ dest="config_key",
+ action="store",
+ default="tfm_mps2_sse_200",
+ help="Select built-in configuration by name")
+ config_g.add_argument("-cf", "--config-file",
+ dest="config_file",
+ action="store",
+ help="Load config from external file in JSON format")
+ config_g.add_argument("-te", "--task-config-export",
+ dest="cconfig",
+ action="store",
+ help="Export a json file with the current config "
+ "parameters")
+ config_g.add_argument("-tl", "--task-config-list",
+ dest="ls_config",
+ action="store_true",
+ default=False,
+ help="List built-in configurations")
+
+ def_g.add_argument("-tc", "--task-create-definition",
+ dest="create_definition",
+ action="store_true",
+ default=False,
+ help="Used in conjunction with --config parameters. "
+ "A LAVA compatible job definition will be created")
+ def_g.add_argument("-cb", "--create-definition-build-no",
+ dest="build_no",
+ action="store",
+ default="lastSuccessfulBuild",
+ help="JENKINGS Build number selector. "
+ "Default: lastSuccessfulBuild")
+ def_g.add_argument("-co", "--create-definition-output-file",
+ dest="lava_def_output",
+ action="store",
+ default="job_results.yaml",
+ help="Set LAVA compatible .yaml output file")
+
+ # Parameter override commands
+ over_g.add_argument("-ow", "--override-work-path",
+ dest="work_dir",
+ action="store",
+ help="Working Directory (absolute path)")
+ over_g.add_argument("-ot", "--override-template-dir",
+ dest="template_dir",
+ action="store",
+ default="jinja2_templates",
+ help="Set directory where Jinja2 templates are stored")
+ over_g.add_argument("-op", "--override-platform",
+ dest="platform",
+ action="store",
+ help="Override platform.Only the provided one "
+ "will be tested ")
+ parse_g.add_argument("-tp", "--task-lava-parse",
+ dest="lava_results",
+ action="store",
+ help="Parse provided yaml file, using a configuration"
+ " as reference to determine the outpcome"
+ " of testing")
+ parse_g.add_argument("-ls", "--lava-parse-summary",
+ dest="lava_summary",
+ default=True,
+ action="store_true",
+ help="Print full test summary")
+
+ # Lava job control commands
+ disp_g.add_argument("-td", "--task-dispatch",
+ dest="dispatch",
+ action="store",
+ help="Submit yaml file defined job to backend, and "
+ "wait for it to complete. \nRequires:"
+ " --lava_url --lava_token_usr/pass/--"
+ "lava_token_from_environ arguments, with optional"
+ "\n--lava_rpc_prefix\n--lava-job-results\n"
+ "parameters. \nIf not set they get RPC2 and "
+ "lava_job_results.yaml default values.\n"
+ "The number job id will be stored at lava_job.id")
+ disp_g.add_argument("-dc", "--dispatch-cancel",
+ dest="dispatch_cancel",
+ action="store",
+ help="Send a cancell request for job with provided id")
+ disp_g.add_argument("-dt", "--dispatch-timeout",
+ dest="dispatch_timeout",
+ default="3600",
+ action="store",
+ help="Maximum Time to block for job"
+ " submission to complete")
+ disp_g.add_argument("-dl", "--dispatch-lava-url",
+ dest="lava_url",
+ action="store",
+ help="Sets the lava hostname during job dispatch")
+ disp_g.add_argument("-dr", "--dispatch-lava-rpc-prefix",
+ dest="lava_rpc",
+ action="store",
+ default="RPC2",
+ help="Application prefix on Backend"
+ "(i.e www.domain.com/APP)\n"
+ "By default set to RPC2")
+ disp_g.add_argument("-du", "--dispatch-lava_token_usr",
+ dest="token_usr",
+ action="store",
+ help="Lava user submitting the job")
+ disp_g.add_argument("-ds", "--dispatch-lava_token_secret",
+ dest="token_secret",
+ action="store",
+ help="Hash token used to authenticate"
+ "user during job submission")
+ disp_g.add_argument("-de", "--dispatch-lava_token_from_environ",
+ dest="token_from_env",
+ action="store_true",
+ help="If set dispatcher will use the enviroment"
+ "stored $LAVA_USER, $LAVA_TOKEN for credentials")
+ disp_g.add_argument("-df", "--dispatch-lava-job-results-file",
+ dest="lava_job_results",
+ action="store",
+ default="lava_job_results.yaml",
+ help="Name of the job results file after job is "
+ "complete. Default: lava_job_results.yaml")
+ return parser.parse_args()
+
+
+if __name__ == "__main__":
+ main(get_cmd_args())
diff --git a/lava_helper/lava_helper_configs.py b/lava_helper/lava_helper_configs.py
new file mode 100644
index 0000000..1ac1479
--- /dev/null
+++ b/lava_helper/lava_helper_configs.py
@@ -0,0 +1,188 @@
+#!/usr/bin/env python3
+
+""" lava_job_generator_configs.py:
+
+ Default configurations for lava job generator """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+
+def lava_gen_get_config_subset(config,
+ default=True,
+ core=True,
+ regression=True):
+ """ Allow dynamic generation of configuration combinations by subtracking
+ undesired ones """
+
+ from copy import deepcopy
+ cfg = deepcopy(config)
+ tests = deepcopy(config["tests"])
+
+ # Remove all configs not requests by the caller
+ if not default:
+ tests.pop("Default")
+ if not core:
+ tests.pop("CoreTest")
+ if not regression:
+ tests.pop("Regression")
+
+ cfg["tests"] = tests
+ return cfg
+
+
+tfm_mps2_sse_200 = {
+ "templ": "template_tfm_mps2_sse_200.jinja2",
+ "job_name": "mps2plus-arm-tfm",
+ "device_type": "mps",
+ "job_timeout": 60,
+ "action_timeout": 60,
+ "monitor_timeout": 60,
+ "recovery_store_url": "https://ci.trustedfirmware.org/"
+ "job/tf-m-build-test-review",
+ "artifact_store_url": "https://ci.trustedfirmware.org/"
+ "job/tf-m-build-test-review",
+ "platforms": {"AN521": "mps2_sse200_an512.tar.gz"},
+ "compilers": ["GNUARM"],
+ "build_types": ["Debug"],
+ "boot_types": ["BL2"],
+ "tests": {
+ 'Default': {
+ "binaries": {
+ "firmware":
+ "install/outputs/AN521/tfm_sign.bin",
+ "bootloader":
+ "install/outputs/AN521/mcuboot.bin"
+ },
+ "monitors": [
+ {
+ 'name': 'Secure_Test_Suites_Summary',
+ 'start': 'Jumping to the first image slot',
+ 'end': '\\x1b\\\[0m',
+ 'pattern': r'\x1b\\[1;34m\\[Sec Thread\\] '
+ r'(?P<test_case_id>Secure image '
+ r'initializing)(?P<result>!)',
+ 'fixup': {"pass": "!", "fail": ""},
+ 'required': ["secure_image_initializing"]
+ } # Monitors
+ ]
+ }, # Default
+ 'CoreTest': {
+ "recovery": "mps2_sse200_an512.tar.gz",
+ "binaries": {
+ "firmware": "install/outputs/AN521/tfm_sign.bin",
+ "bootloader": "install/outputs/AN521/mcuboot.bin"
+ },
+ "monitors": [
+ {
+ 'name': 'Non_Secure_Test_Suites_Summary',
+ 'start': 'TFM level is: 3',
+ 'end': 'End of Non-secure test suites',
+ 'pattern': r"[\x1b]\\[37mTest suite '"
+ r"(?P<test_case_id>.*)' has [\x1b]\\[32m"
+ r" (?P<result>PASSED|FAILED)",
+ 'fixup': {"pass": "PASSED", "fail": "FAILED"},
+ 'required': [
+ "core_non_secure_positive_tests_tfm_core_test_1xxx_"]
+ } # Monitors
+ ]
+ }, # CoreTest
+ 'Regression': {
+ "recovery": "mps2_sse200_an512.tar.gz",
+ "binaries": {
+ "firmware": "install/outputs/AN521/tfm_sign.bin",
+ "bootloader": "install/outputs/AN521/mcuboot.bin"
+ },
+ "monitors": [
+ {
+ 'name': 'Secure_Test_Suites_Summary',
+ 'start': 'Secure test suites summary',
+ 'end': 'End of Secure test suites',
+ 'pattern': r"[\x1b]\\[37mTest suite '(?P<"
+ r"test_case_id>.*)' has [\x1b]\\[32m "
+ r"(?P<result>PASSED|FAILED)",
+ 'fixup': {"pass": "PASSED", "fail": "FAILED"},
+ 'required': [
+ "invert_secure_interface_tests_tfm_invert_test_1xxx_",
+ "sst_reliability_tests_tfm_sst_test_3xxx_",
+ "sst_secure_interface_tests_tfm_sst_test_2xxx_"
+ ]
+ },
+ {
+ 'name': 'Non_Secure_Test_Suites_Summary',
+ 'start': 'Non-secure test suites summary',
+ 'end': r'End of Non-secure test suites',
+ 'pattern': r"[\x1b]\\[37mTest suite '(?P"
+ r"<test_case_id>.*)' has [\x1b]\\[32m "
+ r"(?P<result>PASSED|FAILED)",
+ 'fixup': {"pass": "PASSED", "fail": "FAILED"},
+ 'required': [
+ "core_non_secure_positive_tests_tfm_core_test_1xxx_",
+ ("invert_non_secure_interface_tests_"
+ "tfm_invert_test_1xxx_"),
+ "sst_policy_tests_tfm_sst_test_4xxx_",
+ "sst_non_secure_interface_tests_tfm_sst_test_1xxx_",
+ "sst_referenced_access_tests_tfm_sst_test_5xxx_"]
+ }
+ ] # Monitors
+ }, # Regression
+ } # Tests
+}
+
+# All configurations should be mapped here
+lava_gen_config_map = {"tfm_mps2_sse_200": tfm_mps2_sse_200}
+lavagen_config_sort_order = [
+ "templ",
+ "job_name",
+ "device_type",
+ "job_timeout",
+ "action_timeout",
+ "monitor_timeout",
+ "recovery_store_url",
+ "artifact_store_url",
+ "platforms",
+ "compilers",
+ "build_types",
+ "boot_types",
+ "tests"
+]
+
+lava_gen_monitor_sort_order = [
+ 'name',
+ 'start',
+ 'end',
+ 'pattern',
+ 'fixup',
+]
+
+if __name__ == "__main__":
+ import os
+ import sys
+ from lava_helper import sort_lavagen_config
+ try:
+ from tfm_ci_pylib.utils import export_config_map
+ except ImportError:
+ dir_path = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(os.path.join(dir_path, "../"))
+ from tfm_ci_pylib.utils import export_config_map
+
+ if len(sys.argv) == 2:
+ if sys.argv[1] == "--export":
+ export_config_map(lava_gen_config_map)
+ if len(sys.argv) == 3:
+ if sys.argv[1] == "--export":
+ export_config_map(sort_lavagen_config(lava_gen_config_map),
+ sys.argv[2])
diff --git a/make_checkpatch_summary.sh b/make_checkpatch_summary.sh
new file mode 100755
index 0000000..8e068af
--- /dev/null
+++ b/make_checkpatch_summary.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+##
+##@file
+##@brief This script is to make a summary of run-checkpatch.sh generated output
+##files.
+##
+##The generated summary will hold the number of error and warning messages for
+##each file.
+##
+##The first parameter of the script must be the location of input file.
+##
+
+#Check parameter
+if [ -z ${1+x} ]
+then
+ echo "Checkpatch output file not specified!"
+ exit 1
+fi
+
+infile="$1"
+
+#Find the summary line for each file. Cut the summary line plus the file name
+#the previous line.
+#Concatenate the current line to the previos one,
+#Print the two lines match the following regexp:
+# remember anything any number of non : characters (this is the file path)
+# followed by a :
+# match any nuber of following characters till "total:" is found
+# remember all characters after "total:" (this is the summary)
+# replace the matched string with first and and the second match concatenated
+# with new line and a tab character in between.
+# we use s: single line and m: multi line modificators for the regexp match
+res=$(perl -ne '$l=$l.$_; print "$l" if $l=~s/.*?([^:]+):.*\ntotal:(.*)/$1:\n\t$2/sm;$l=$_;' "$infile")
+
+#Print the result to standard output.
+cat <<EOM
+Checkpatch result summary:
+$res
+EOM
diff --git a/make_cppcheck_summary.sh b/make_cppcheck_summary.sh
new file mode 100755
index 0000000..aff1909
--- /dev/null
+++ b/make_cppcheck_summary.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+#Fail if any executed command fails.
+set -e
+
+##
+##@file
+##@brief This script is to make a summary of cppcheck XML output files.
+##
+##The generated summary will hold the number of messages of each severity type.
+##
+##The first parameter of the script must be the location of the XML file.
+##
+##The script uses regual expressions to identify and count messages.
+##
+##Usage:
+## command | result
+## --------|-------
+## make_cppcheck_summary.sh foo/bar/build.xml | Summary text.
+##
+
+#Check parameter
+if [ -z ${1+x} ]
+then
+ echo "Cppcheck output file not specified!"
+ exit 1
+fi
+
+xml_file="$1"
+
+#List of error types cmake reports.
+severity_list=( "none" "error" "warning" "style" "performance" "portability"
+ "information" "debug")
+
+#Count each severity type and build result message.
+for severity in "${severity_list[@]}"
+do
+ #Count lines with this severity type.
+ n=$(grep -c "severity=\"$severity\"" "$xml_file" || true)
+ #Start of report line
+ line=$'\n\tIssues with severity '"\"$severity\":"
+ #Indentatin to character position 46.
+ indent=$(eval "printf ' %.0s' {1..$(( 46-${#line} ))}")
+ #Add identation and number
+ line="$line$indent$n"
+ #Extend issue list
+ issue_list="$issue_list$line"
+done
+msg="Cppcheck results: $issue_list"
+
+echo "$msg"
diff --git a/run-checkpatch.sh b/run-checkpatch.sh
new file mode 100755
index 0000000..60c3be9
--- /dev/null
+++ b/run-checkpatch.sh
@@ -0,0 +1,293 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+##
+##@file
+##@brief Execute checkpatch
+##
+##This bash script can be used to execute checkpatch for the tf-m project.
+##The script can be started with -h to give help on usage.
+##
+
+
+##@var SKIP_PATHS
+##@brief Folders and files to not be analysed by checkpatch
+##
+##This variable specifies the list of directories which shall not be analysed
+##by checkpatch.
+##This is a colon (:) separated list.
+##
+#This is needed for Doxygen for now.
+#!string SKIP_PATHS;
+SKIP_PATHS='./build-\*:./test/\*:./platform/\*:*/tz_\*'
+
+##@var TFM_DIRECTORY_NAME
+##@brief Default path to tf-m source code.
+##
+#This is needed for Doxygen for now.
+#!path TFM_DIRECTORY_NAME;
+TFM_DIRECTORY_NAME="./"
+
+##@var OUTPUT_FILE_PATH
+##@brief Default path to report file.
+##
+##This text file will hold the output report of checkpatch.
+##
+#This is needed for Doxygen for now.
+#!path OUTPUT_FILE_PATH;
+OUTPUT_FILE_PATH="tfm_checkpatch_report.txt"
+
+##@var CHECKPATCH_PATH_DEF
+##@brief Default Path to checkpatch executable.
+##
+#This is needed for Doxygen for now.
+#!path CHECKPATCH_PATH_DEF;
+CHECKPATCH_PATH_DEF=$(readlink -f $(dirname "$0")"/checkpatch")
+
+##@var CHECKPATCH_PATH
+##@brief Path to checkpatch executable.
+##
+## Checkpatch path can be overriden by user argument. Initialized with Default
+## value of ./checkpatch
+##
+#This is needed for Doxygen for now.
+#!path CHECKPATCH_PATH;
+CHECKPATCH_PATH=$CHECKPATCH_PATH_DEF
+
+##@fn usage(void)
+##@brief Print help text on usage.
+##@returns n/a
+##
+#This is needed for Doxygen for now.
+#!void usage(void){};
+usage() {
+ echo "Usage: $(basename -- "$0") [-v] [-h] [-d <TF-M dir>] [-f <output_filename>] [-u] [-p <number>]"
+ echo " -v, Verbose output"
+ echo " -h, Script help"
+ echo " -d, <TF-M dir>, TF-M directory"
+ echo " -f, <output_filename>, Output filename"
+ echo " -u, Update checkpatch files using curl"
+ echo " -l <number>, Check only the last <number> commits (HEAD~<number>)."
+ echo " -p <path>, Provide location of directory containing checkpath."
+ echo -e "\nNOTE: make sure checkpatch is located in '$CHECKPATCH_PATH'"
+}
+
+##@fn app_err(void)
+##@brief Print error massage.
+##@returns n/a
+##
+#This is needed for Doxygen for now.
+#!void app_err(void){};
+app_err() {
+ echo "Error: "$1 >&2
+}
+
+##@fn download_checkpatch_file(string f_name)
+##@brief Download the specified checkpatch file.
+##@param[in] f_name name of file to download.
+##@returns status code
+##
+##Download checkpatch files from raw.githubusercontent.com to the current
+##directory. Target files are truncated to avoid breaking links.
+##
+#This is needed for Doxygen for now.
+#!err download_checkpatch_file(string f_name){};
+download_checkpatch_file() {
+ # HTTPS address location to download checkpatch file
+ if [ $VERBOSE -eq 0 ]; then
+ REDIRECT=" >/dev/null"
+ fi
+
+ curl "https://raw.githubusercontent.com/torvalds/linux/master/scripts/$1" --output "$1.new" &>/dev/null
+
+ if [ $? != 0 ]; then
+ app_err "curl reported error while downloading $1"
+ exit 1
+ else
+ #Copy file and preserve links.
+ cat "$1.new" > "$1"
+ rm "$1.new"
+ fi
+}
+
+##@fn update_checkpatch()
+##@brief Download checkpatch files.
+##@returns status code
+##
+##Download checkpatch files from raw.githubusercontent.com to \ref CHECKPATCH_PATH
+##directory.
+##
+#This is needed for Doxygen for now.
+#!void update_checkpatch(){};
+update_checkpatch() {
+ echo "Updating checkpatch..."
+ if ! [ -x "$(command -v curl)" ]; then
+ app_err "curl was not found. Please, make sure curl command is available"
+ exit 1
+ fi
+
+ pushd $CHECKPATCH_PATH > /dev/null
+ #Execute popd when shell exits.
+ trap popd 0
+
+ # Download checkpatch.pl
+ download_checkpatch_file checkpatch.pl
+ chmod 750 $CHECKPATCH_PATH/checkpatch.pl
+
+ # Download const_structs.checkpatch
+ download_checkpatch_file const_structs.checkpatch
+ chmod 640 $CHECKPATCH_PATH/const_structs.checkpatch
+
+ # Download spelling.txt
+ download_checkpatch_file spelling.txt
+ chmod 640 $CHECKPATCH_PATH/spelling.txt
+
+ popd >/dev/null
+ #Remove cleanup trap
+ trap 0
+}
+
+##@fn check_tree()
+##@brief Run checkpatch in directory tree checking mode
+##@returns status code
+##
+##Execute checkpatch to check the full content of all source files under the
+##directory specified in \ref TFM_DIRECTORY_NAME. Directory content specified in
+##\ref SKIP_PATHS will be excluded.
+##This function uses xargs to execute multiple checkpatch instances in parallel.
+##
+#This is needed for Doxygen for now.
+#!void check_tree(){};
+check_tree() {
+ # Find all files to execute checkpatch on
+ FIND_CMD="find $TFM_DIRECTORY_NAME -name '*.[ch]' -a -not \( -path "${SKIP_PATHS//:/ -o -path }" \)"
+ echo "Scanning "$TFM_DIRECTORY_NAME" dir to find all .c and .h files to check ..."
+ #Modify checkpatch command line to make checkpatch work on files.
+ CHECKPATCH_CMD="$CHECKPATCH_CMD -f "
+ if [ $VERBOSE -eq 1 ]; then
+ eval "$FIND_CMD" | xargs -n 1 -i -P 8 $CHECKPATCH_CMD {} |tee -a "$OUTPUT_FILE_PATH"
+ else
+ eval "$FIND_CMD" | xargs -n 1 -i -P 8 $CHECKPATCH_CMD {} >> $OUTPUT_FILE_PATH
+ fi
+}
+
+##@fn check_diff()
+##@brief Run checkpatch in git diff mode.
+##@returns status code
+##
+##Execute checkpatch to check the last N (\ref CHECK_LAST_COMMITS) commits of
+##the branch checked out at directory specified in \ref TFM_DIRECTORY_NAME.
+##Directory content specified in \ref SKIP_PATHS will be excluded.
+##
+#This is needed for Doxygen for now.
+#!void check_diff(){};
+check_diff() {
+ BASE_COMMIT="HEAD~$CHECK_LAST_COMMITS"
+ #use find to limit diff content to the same set of files as when checking
+ #the whole tree.
+ FIND_CMD="find ./ -name '*.[ch]' -a -not \( -path "${SKIP_PATHS//:/ -o -path }" \)"
+
+ #enter tf-m working copy to make git commands execute fine
+ pushd "$TFM_DIRECTORY_NAME" > /dev/null
+ #Execute popd when shell exits
+ trap popd 0
+
+ #List of files we care about. Filter out changed files from interesting
+ #list of files. This is needed to avoid GIT_CMD to break the argument
+ #list length.
+ CARE_LIST=$(eval $FIND_CMD | grep "$(git diff $BASE_COMMIT --name-only)" -)
+ GIT_CMD="git diff $BASE_COMMIT -- $CARE_LIST"
+
+ echo "Checking commits: $(git log "$BASE_COMMIT"..HEAD --format=%h | tr $"\n" " ")"
+
+ #Modify checkpatch parameters to give more details when working on
+ #diff:s
+ CHECKPATCH_CMD="$CHECKPATCH_CMD --showfile -"
+
+ if [ $VERBOSE -eq 1 ]; then
+ $GIT_CMD | $CHECKPATCH_CMD | tee -a "$OUTPUT_FILE_PATH"
+ else
+ $GIT_CMD | $CHECKPATCH_CMD >> $OUTPUT_FILE_PATH
+ fi
+
+ popd > /dev/null
+ #Remove cleanup trap.
+ trap 0
+}
+
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#~~~~~~~~~~~~~~~~~~~~~~~~ Entry point ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#Internal variables not to be modified.
+VERBOSE=0
+UPDATE_CHECKPATCH_FILES=0
+
+##@var CHECK_LAST_COMMITS
+##@brief Number of commits to check.
+##
+##Number of commits relative to HEAD to check. When set to 0 full file content
+##is checked instead of commit diffs.
+##
+#This is needed for Doxygen for now.
+#!path CHECK_LAST_COMMITS;
+CHECK_LAST_COMMITS=0
+
+# Getting options and setting variables required to execute the script. This
+# script starts executing from here.
+while getopts "uvhd:f:l:p:" opt
+do
+ case $opt in
+ v) VERBOSE=1 ;;
+ h) usage ; exit 0 ;;
+ d) TFM_DIRECTORY_NAME="$OPTARG" ;;
+ f) OUTPUT_FILE_PATH="$OPTARG" ;;
+ u) UPDATE_CHECKPATCH_FILES=1 ;;
+ l) CHECK_LAST_COMMITS="$OPTARG" ;;
+ p) CHECKPATCH_PATH="$OPTARG" ;;
+ \?) usage ; exit 1 ;;
+ esac
+done
+
+# Update checkpatch files
+if [ $UPDATE_CHECKPATCH_FILES -eq 1 ]; then
+ update_checkpatch
+ echo "Checkpatch update was successfull."
+ exit 0
+fi
+
+#Convert checkpath override path to full path
+CHECKPATCH_PATH=$(readlink -f "$CHECKPATCH_PATH")
+
+#Convert output file name to full path
+OUTPUT_FILE_PATH=$(readlink -f "$OUTPUT_FILE_PATH")
+
+# Create checkpatch command
+CHECKPATCH_APP=$CHECKPATCH_PATH"/checkpatch.pl"
+CHECKPATCH_CONFG_FILENAME=$CHECKPATCH_PATH_DEF"/checkpatch.conf"
+CHECKPATCH_CMD=$CHECKPATCH_APP" $(grep -o '^[^#]*' $CHECKPATCH_CONFG_FILENAME)"
+
+# Check if checkpatch is present
+if ! [ -f "$CHECKPATCH_APP" ]; then
+ app_err "checkpatch.pl was not found. checkpatch.pl has to be located in $CHECKPATCH_PATH"
+ exit 1
+fi
+
+#Truncate previous content
+: > $OUTPUT_FILE_PATH
+
+#Do we need to work on a git diff?
+if [ $CHECK_LAST_COMMITS -eq 0 ]
+then
+ #Working on files
+ check_tree
+else
+ #Working on git diff
+ check_diff
+fi
+
+echo "checkpatch report \"$OUTPUT_FILE_PATH\" is ready!"
diff --git a/run-cppcheck.sh b/run-cppcheck.sh
new file mode 100755
index 0000000..59a35e5
--- /dev/null
+++ b/run-cppcheck.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+##
+##@file
+##@brief Execute cppcheck
+##
+##This bash script can be used to execute cppcheck for the tf-m project.
+##It will use the CMake generated "compile_commands.json" file.
+##CMake is executed to generate the build commands for the "default" build
+##configuration (i.e. no build config file is specifyed on the command-line).
+##
+##This file shall be executed from the root directory of the tf-m working copy.
+##
+##In order to have all include file in place, some CMake external projects will
+##be built, and thus C build tools for the default build configuration must be
+##available.
+##
+##The script will generate two XML output files:
+##file | description
+##--------|--------
+##chk-config.xml | The result of cppcheck configuration verification.
+##chk-src.xml. | The result of source file verification.
+##
+##@todo The current version of cppcheck seems to ignore command line parameters
+## when using the --project command line switch. As a result it is not
+## possible to define additional macros and include paths on the command
+## line. This results in some incorrect error and warning messages.
+##@todo The file cppcheck/arm-cortex-m.cfg needs to be revised. Some settings
+## might be invalid, and also a differnet file may be needed based on
+## used compiler switches (i.e. to match witdh specification and or default
+## sign for some types).
+##@todo Currently cppcheck is only executed for the default build configuration
+## "ConfigDefault.cmake"for target AN521 of the "top level" project.
+## This might need to be revied/changed in the future.
+##
+
+#Fail if any command exit with error.
+set -e
+
+#The location from where the script executes
+mypath=$(dirname $0)
+
+. "$mypath/util_cmake.sh"
+
+
+#Library file for cppcheck
+library_file="$(fix_win_path $(get_full_path $mypath))/cppcheck/arm-cortex-m.cfg"
+suppress_file="$(fix_win_path $(get_full_path $mypath))/cppcheck/tfm-suppress-list.txt"
+
+#Run cmake to get the compile_commands.json file
+echo
+echo '******* Generating compile_commandas.json ***************'
+echo
+generate_project $(fix_win_path $(get_full_path ./)) "./" "cppcheck" "-DCMAKE_EXPORT_COMPILE_COMMANDS=1 -DTARGET_PLATFORM=AN521 -DCOMPILER=GNUARM"
+#Enter the build directory
+bdir=$(make_build_dir_name "./" "cppcheck")
+pushd "$bdir" >/dev/null
+#Build the external projects to get all headers installed to plases from where
+#tf-m code uses them
+echo
+echo '******* Install external projects to their final place ***************'
+echo
+make -j mbedtls_sst_lib_install mbedtls_mcuboot_lib_install
+
+#Now run cppcheck.
+echo
+echo '******* checking cppcheck configuration ***************'
+echo
+cppcheck --xml -j 4 --check-config --enable=all --library="$library_file" --project=compile_commands.json --suppressions-list="$suppress_file" --inline-suppr 2>chk-config.xml
+
+echo
+echo '******* analyzing files with cppcheck ***************'
+echo
+cppcheck --xml -j 4 --enable=all --library="$library_file" --project=compile_commands.json --suppressions-list="$suppress_file" --inline-suppr 2>chk-src.xml
+popd
+
+echo
+echo '******* Please check chk-config.xml and chk-src.xml for the results. ***************'
+echo
diff --git a/tfm_ci_pylib/__init__.py b/tfm_ci_pylib/__init__.py
new file mode 100644
index 0000000..0ea14cc
--- /dev/null
+++ b/tfm_ci_pylib/__init__.py
@@ -0,0 +1,12 @@
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+
+__all__ = ["tfm_builder",
+ "tfm_build_manager",
+ "utils"]
diff --git a/tfm_ci_pylib/lava_rpc_connector.py b/tfm_ci_pylib/lava_rpc_connector.py
new file mode 100644
index 0000000..269cbbf
--- /dev/null
+++ b/tfm_ci_pylib/lava_rpc_connector.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+
+""" lava_rpc_connector.py:
+
+ class that extends xmlrpc in order to add LAVA specific functionality.
+ Used in managing communication with the back-end. """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import xmlrpc.client
+import time
+
+
+class LAVA_RPC_connector(xmlrpc.client.ServerProxy, object):
+
+ def __init__(self,
+ username,
+ token,
+ hostname,
+ rest_prefix="RPC2",
+ https=False):
+
+ # If user provides hostname with http/s prefix
+ if "://" in hostname:
+ htp_pre, hostname = hostname.split("://")
+ server_addr = "%s://%s:%s@%s/%s" % (htp_pre,
+ username,
+ token,
+ hostname,
+ rest_prefix)
+ self.server_url = "%s://%s" % (htp_pre, hostname)
+ else:
+ server_addr = "%s://%s:%s@%s/%s" % ("https" if https else "http",
+ username,
+ token,
+ hostname,
+ rest_prefix)
+ self.server_url = "%s://%s" % ("https" if https else "http",
+ hostname)
+
+ self.server_job_prefix = "%s/scheduler/job/%%s" % self.server_url
+ super(LAVA_RPC_connector, self).__init__(server_addr)
+
+ def _rpc_cmd_raw(self, cmd, params=None):
+ """ Run a remote comand and return the result. There is no constrain
+ check on the syntax of the command. """
+
+ cmd = "self.%s(%s)" % (cmd, params if params else "")
+ return eval(cmd)
+
+ def ls_cmd(self):
+ """ Return a list of supported commands """
+
+ print("\n".join(self.system.listMethods()))
+
+ def get_job_results(self, job_id, yaml_out_file=None):
+ results = self.results.get_testjob_results_yaml(job_id)
+ if yaml_out_file:
+ with open(yaml_out_file, "w") as F:
+ F.write(results)
+ return results
+
+ def get_job_state(self, job_id):
+ return self.scheduler.job_state(job_id)["job_state"]
+
+ def get_job_status(self, job_id):
+ return self.scheduler.job_status(job_id)["job_status"]
+
+ def cancel_job(self, job_id):
+ """ Cancell job with id=job_id. Returns True if successfull """
+
+ return self.scheduler.jobs.cancel(job_id)
+
+ def validate_job_yaml(self, job_definition, print_err=False):
+ """ Validate a job definition syntax. Returns true is server considers
+ the syntax valid """
+
+ try:
+ with open(job_definition) as F:
+ input_yaml = F.read()
+ self.scheduler.validate_yaml(input_yaml)
+ return True
+ except Exception as E:
+ if print_err:
+ print(E)
+ return False
+
+ def submit_job(self, job_definition):
+ """ Will submit a yaml definition pointed by job_definition after
+ validating it againist the remote backend. Returns resulting job id,
+ and server url for job"""
+
+ try:
+ if not self.validate_job_yaml(job_definition):
+ print("Served rejected job's syntax")
+ raise Exception("Invalid job")
+ with open(job_definition, "r") as F:
+ job_data = F.read()
+ except Exception as e:
+ print("Cannot submit invalid job. Check %s's content" %
+ job_definition)
+ print(e)
+ return None, None
+
+ job_id = self.scheduler.submit_job(job_data)
+ job_url = self.server_job_prefix % job_id
+ return(job_id, job_url)
+
+ def resubmit_job(self, job_id):
+ """ Re-submit job with provided id. Returns resulting job id,
+ and server url for job"""
+
+ job_id = self.scheduler.resubmit_job(job_id)
+ job_url = self.server_job_prefix % job_id
+ return(job_id, job_url)
+
+ def block_wait_for_job(self, job_id, timeout, poll_freq=1):
+ """ Will block code execution and wait for the job to submit.
+ Returns job status on completion """
+
+ start_t = int(time.time())
+ while(True):
+ cur_t = int(time.time())
+ if cur_t - start_t >= timeout:
+ print("Breaking because of timeout")
+ break
+ # Check if the job is not running
+ cur_status = self.get_job_status(job_id)
+ # If in queue or running wait
+ if cur_status == "Running" or cur_status == "Submitted":
+ time.sleep(poll_freq)
+ else:
+ break
+ return self.get_job_status(job_id)
+
+ def test_credentials(self):
+ """ Attempt to querry the back-end and verify that the user provided
+ authentication is valid """
+
+ try:
+ self._rpc_cmd_raw("system.listMethods")
+ return True
+ except Exception as e:
+ print(e)
+ print("Credential validation failed")
+ return False
+
+
+if __name__ == "__main__":
+ pass
diff --git a/tfm_ci_pylib/structured_task.py b/tfm_ci_pylib/structured_task.py
new file mode 100644
index 0000000..b97cae9
--- /dev/null
+++ b/tfm_ci_pylib/structured_task.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+""" structured_task.py:
+
+ A generic abstraction class for executing a task with prerequesites and
+ post execution action """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import abc
+import time
+import multiprocessing
+
+
+class structuredTask(multiprocessing.Process):
+ """ A class that defined well structured chained execution of commands """
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, name):
+
+ self._stopevent = multiprocessing.Event()
+ self._exec_sleep_period = 1.0
+ self._join_timeout = 1.0
+ self._exec_timeout = 0.0
+ self._task_name = name
+
+ # multiprocessing safe shared memory variables
+ self._mprc_manager = multiprocessing.Manager()
+
+ # Dictionary used to store objects between stages
+ self._mprc_stash = self._mprc_manager.dict()
+
+ # Integer variable that stores status of flow
+ self._mprc_status = multiprocessing.Value('i', False)
+ super(structuredTask, self).__init__(name=name)
+
+ # Perform initialization
+ # If user code raises exception, class memory will not be allocated
+ # Variables can be safely shared in the pre stages, use stash for
+ # next stages
+ self.pre_exec(self.pre_eval())
+
+ # Class API/Interface
+
+ @abc.abstractmethod
+ def pre_eval(self):
+ """ Tests that need to be run in set-up state """
+
+ @abc.abstractmethod
+ def pre_exec(self, eval_ret):
+ """ Tasks that set-up execution enviroment """
+
+ @abc.abstractmethod
+ def task_exec(self):
+ """ Main tasks """
+
+ @abc.abstractmethod
+ def post_eval(self, eval_ret):
+ """ Tests that need to be run after main task """
+
+ @abc.abstractmethod
+ def post_exec(self):
+ """ Tasks that are run after main task """
+
+ def stash(self, key, data):
+ """ Store object in a shared memory interface """
+
+ self._mprc_stash[key] = data
+
+ def unstash(self, key):
+ """ Retrieve object from a shared memory interface """
+
+ try:
+ return self._mprc_stash[key]
+ except KeyError:
+ return None
+
+ def get_name(self):
+ """" Return name label of class """
+ return self._task_name
+
+ def get_status(self):
+ """ Return the status of the execution flow """
+ with self._mprc_status.get_lock():
+ return self._mprc_status.value
+
+ def set_status(self, status):
+ """ Return the status of the execution flow """
+ with self._mprc_status.get_lock():
+ self._mprc_status.value = status
+
+ def run(self):
+ try:
+
+ # Run Core code
+ while not self._stopevent.is_set():
+ self.task_exec()
+ time.sleep(self._exec_sleep_period)
+ break
+ # print("Stop Event Detected")
+ # TODO Upgrade reporting to a similar format
+ print("%s ==> Stop Event Detected" % self.get_name())
+
+ # Post stage
+ # If something faifs in post the user should set the correct status
+ self.set_status(0)
+ print("%s ==> Stop Event Set OK Status" % self.get_name())
+ except Exception as exc:
+ print(("ERROR: Stopping %s "
+ "with Exception: \"%s\"") % (self.get_name(), exc))
+ self.set_status(1)
+ # Always call post, and determine success failed by get_status
+ self.post_exec(self.post_eval())
+
+ def _t_stop(self):
+ """ Internal class stop to be called through thread """
+ print("Thead is alive0 %s" % self.is_alive())
+ if(self.is_alive()):
+ print("%s =========> STOP" % self.get_name())
+ self._stopevent.set()
+ print("Thead is alive %s" % self.is_alive())
+ print("Stop Event Triggered")
+
+ def stop(self):
+ """ External stop to be called by user code """
+
+ self._t_stop()
+ super(structuredTask, self).join(self._join_timeout)
diff --git a/tfm_ci_pylib/tfm_build_manager.py b/tfm_ci_pylib/tfm_build_manager.py
new file mode 100644
index 0000000..dcf75de
--- /dev/null
+++ b/tfm_ci_pylib/tfm_build_manager.py
@@ -0,0 +1,260 @@
+#!/usr/bin/env python3
+
+""" tfm_build_manager.py:
+
+ Controlling class managing multiple build configruations for tfm """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import os
+import sys
+from pprint import pprint
+from copy import deepcopy
+from .utils import gen_cfg_combinations, list_chunks, load_json,\
+ save_json, print_test
+from .structured_task import structuredTask
+from .tfm_builder import TFM_Builder
+
+
+class TFM_Build_Manager(structuredTask):
+ """ Class that will load a configuration out of a json file, schedule
+ the builds, and produce a report """
+
+ def __init__(self,
+ tfm_dir, # TFM root directory
+ work_dir, # Current working directory(ie logs)
+ cfg_dict, # Input config dictionary of the following form
+ # input_dict = {"PROJ_CONFIG": "ConfigRegression",
+ # "TARGET_PLATFORM": "MUSCA_A",
+ # "COMPILER": "ARMCLANG",
+ # "CMAKE_BUILD_TYPE": "Debug"}
+ report=None, # File to produce report
+ parallel_builds=3, # Number of builds to run in parallel
+ build_threads=4, # Number of threads used per build
+ markdown=True, # Create markdown report
+ html=True, # Create html report
+ ret_code=True, # Set ret_code of script if build failed
+ install=False): # Install libraries after build
+
+ self._tbm_build_threads = build_threads
+ self._tbm_conc_builds = parallel_builds
+ self._tbm_install = install
+ self._tbm_markdown = markdown
+ self._tbm_html = html
+ self._tbm_ret_code = ret_code
+
+ # Required by other methods, always set working directory first
+ self._tbm_work_dir = os.path.abspath(os.path.expanduser(work_dir))
+
+ self._tbm_tfm_dir = os.path.abspath(os.path.expanduser(tfm_dir))
+
+ # Entries will be filled after sanity test on cfg_dict dring pre_exec
+ self._tbm_build_dir = None
+ self._tbm_report = report
+
+ # TODO move them to pre_eval
+ self._tbm_cfg = self.load_config(cfg_dict, self._tbm_work_dir)
+ self._tbm_build_cfg_list = self.parse_config(self._tbm_cfg)
+
+ super(TFM_Build_Manager, self).__init__(name="TFM_Build_Manager")
+
+ def pre_eval(self):
+ """ Tests that need to be run in set-up state """
+ return True
+
+ def pre_exec(self, eval_ret):
+ """ """
+
+ def task_exec(self):
+ """ Create a build pool and execute them in parallel """
+
+ build_pool = []
+ for i in self._tbm_build_cfg_list:
+
+ name = "%s_%s_%s_%s_%s" % (i.TARGET_PLATFORM,
+ i.COMPILER,
+ i.PROJ_CONFIG,
+ i.CMAKE_BUILD_TYPE,
+ "BL2" if i.WITH_MCUBOOT else "NOBL2")
+ print("Loading config %s" % name)
+ build_pool.append(TFM_Builder(name,
+ self._tbm_tfm_dir,
+ self._tbm_work_dir,
+ dict(i._asdict()),
+ self._tbm_install,
+ self._tbm_build_threads))
+
+ status_rep = {}
+ full_rep = {}
+ print("Build: Running %d parallel build jobs" % self._tbm_conc_builds)
+ for build_pool_slice in list_chunks(build_pool, self._tbm_conc_builds):
+
+ # Start the builds
+ for build in build_pool_slice:
+ # Only produce output for the first build
+ if build_pool_slice.index(build) != 0:
+ build.mute()
+ print("Build: Starting %s" % build.get_name())
+ build.start()
+
+ # Wait for the builds to complete
+ for build in build_pool_slice:
+ # Wait for build to finish
+ build.join()
+ # Similarly print the logs of the other builds as they complete
+ if build_pool_slice.index(build) != 0:
+ build.log()
+ print("Build: Finished %s" % build.get_name())
+
+ # Store status in report
+ status_rep[build.get_name()] = build.get_status()
+ full_rep[build.get_name()] = build.report()
+ # Store the report
+ self.stash("Build Status", status_rep)
+ self.stash("Build Report", full_rep)
+
+ if self._tbm_report:
+ print("Exported build report to file:", self._tbm_report)
+ save_json(self._tbm_report, full_rep)
+
+ def post_eval(self):
+ """ If a single build failed fail the test """
+ try:
+ retcode_sum = sum(self.unstash("Build Status").values())
+ if retcode_sum != 0:
+ raise Exception()
+ return True
+ except Exception as e:
+ return False
+
+ def post_exec(self, eval_ret):
+ """ Generate a report and fail the script if build == unsuccessfull"""
+
+ self.print_summary()
+ if not eval_ret:
+ print("ERROR: ====> Build Failed! %s" % self.get_name())
+ self.set_status(1)
+ else:
+ print("SUCCESS: ====> Build Complete!")
+ self.set_status(0)
+
+ def get_report(self):
+ """ Expose the internal report to a new object for external classes """
+ return deepcopy(self.unstash("Build Report"))
+
+ def print_summary(self):
+ """ Print an comprehensive list of the build jobs with their status """
+
+ full_rep = self.unstash("Build Report")
+
+ # Filter out build jobs based on status
+ fl = ([k for k, v in full_rep.items() if v['status'] == 'Failed'])
+ ps = ([k for k, v in full_rep.items() if v['status'] == 'Success'])
+
+ print_test(t_list=fl, status="failed", tname="Builds")
+ print_test(t_list=ps, status="passed", tname="Builds")
+
+ def gen_cfg_comb(self, platform_l, compiler_l, config_l, build_l, boot_l):
+ """ Generate all possible configuration combinations from a group of
+ lists of compiler options"""
+ return gen_cfg_combinations("TFM_Build_CFG",
+ ("TARGET_PLATFORM COMPILER PROJ_CONFIG"
+ " CMAKE_BUILD_TYPE WITH_MCUBOOT"),
+ platform_l,
+ compiler_l,
+ config_l,
+ build_l,
+ boot_l)
+
+ def load_config(self, config, work_dir):
+ try:
+ # passing config_name param supersseeds fileparam
+ if isinstance(config, dict):
+ ret_cfg = deepcopy(config)
+ elif isinstance(config, str):
+ # If the string does not descrive a file try to look for it in
+ # work directory
+ if not os.path.isfile(config):
+ # remove path from file
+ config_2 = os.path.split(config)[-1]
+ # look in the current working directory
+ config_2 = os.path.join(work_dir, config_2)
+ if not os.path.isfile(config_2):
+ m = "Could not find cfg in %s or %s " % (config,
+ config_2)
+ raise Exception(m)
+ # If fille exists in working directory
+ else:
+ config = config_2
+ ret_cfg = load_json(config)
+
+ else:
+ raise Exception("Need to provide a valid config name or file."
+ "Please use --config/--config-file parameter.")
+ except Exception as e:
+ print("Error:%s \nCould not load a valid config" % e)
+ sys.exit(1)
+
+ pprint(ret_cfg)
+ return ret_cfg
+
+ def parse_config(self, cfg):
+ """ Parse a valid configuration file into a set of build dicts """
+
+ # Generate a list of all possible confugration combinations
+ full_cfg = self.gen_cfg_comb(cfg["platform"],
+ cfg["compiler"],
+ cfg["config"],
+ cfg["build"],
+ cfg["with_mcuboot"])
+
+ # Generate a list of all invalid combinations
+ rejection_cfg = []
+
+ for k in cfg["invalid"]:
+ # Pad the omitted values with wildcard char *
+ res_list = list(k) + ["*"] * (5 - len(k))
+
+ print("Working on rejection input: %s" % (res_list))
+
+ # Key order matters. Use index to retrieve default values When
+ # wildcard * char is present
+ _cfg_keys = ["platform",
+ "compiler",
+ "config",
+ "build",
+ "with_mcuboot"]
+
+ # Replace wildcard ( "*") entries with every inluded in cfg variant
+ for n in range(len(res_list)):
+ res_list[n] = [res_list[n]] if res_list[n] != "*" \
+ else cfg[_cfg_keys[n]]
+
+ rejection_cfg += self.gen_cfg_comb(*res_list)
+
+ # Notfy the user for the rejected configuations
+ for i in rejection_cfg:
+
+ name = "%s_%s_%s_%s_%s" % (i.TARGET_PLATFORM,
+ i.COMPILER,
+ i.PROJ_CONFIG,
+ i.CMAKE_BUILD_TYPE,
+ "BL2" if i.WITH_MCUBOOT else "NOBL2")
+ print("Rejecting config %s" % name)
+
+ # Subtract the two lists and convert to dictionary
+ return list(set(full_cfg) - set(rejection_cfg))
diff --git a/tfm_ci_pylib/tfm_builder.py b/tfm_ci_pylib/tfm_builder.py
new file mode 100644
index 0000000..07ed776
--- /dev/null
+++ b/tfm_ci_pylib/tfm_builder.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python3
+
+""" tfm_builder.py:
+
+ Build wrapping class that builds a specific tfm configuration """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import os
+from .utils import *
+import shutil
+from .structured_task import structuredTask
+
+
+class TFM_Builder(structuredTask):
+ """ Wrap around tfm cmake system and spawn a thread to build the project.
+ """
+ _tfb_build_params = ["TARGET_PLATFORM",
+ "COMPILER",
+ "PROJ_CONFIG",
+ "CMAKE_BUILD_TYPE",
+ "WITH_MCUBOOT"
+ ]
+
+ _tfb_build_template = ("cmake -G \"Unix Makefiles\" -DPROJ_CONFIG=`"
+ "readlink -f %(PROJ_CONFIG)s.cmake` "
+ "-DTARGET_PLATFORM=%(TARGET_PLATFORM)s "
+ "-DCOMPILER=%(COMPILER)s "
+ "-DCMAKE_BUILD_TYPE=%(CMAKE_BUILD_TYPE)s "
+ "-DBL2=%(WITH_MCUBOOT)s "
+ "%(TFM_ROOT)s")
+
+ def __init__(self,
+ name, # Proccess name
+ tfm_dir, # TFM root directory
+ work_dir, # Current working directory(ie logs)
+ cfg_dict, # Input config dictionary of the following form
+ # input_dict = {"PROJ_CONFIG": "ConfigRegression",
+ # "TARGET_PLATFORM": "MUSCA_A",
+ # "COMPILER": "ARMCLANG",
+ # "CMAKE_BUILD_TYPE": "Debug"}
+ install=False, # Install library after build
+ build_threads=4, # Number of CPU thrads used in build
+ silent=False): # Silence stdout ouptut
+
+ self._tfb_cfg = cfg_dict
+ self._tfb_build_threads = build_threads
+ self._tfb_install = install
+ self._tfb_silent = silent
+ self._tfb_binaries = []
+
+ # Required by other methods, always set working directory first
+ self._tfb_work_dir = os.path.abspath(os.path.expanduser(work_dir))
+
+ self._tfb_tfm_dir = os.path.abspath(os.path.expanduser(tfm_dir))
+ # Entries will be filled after sanity test on cfg_dict dring pre_exec
+ self._tfb_build_dir = None
+ self._tfb_log_f = None
+ super(TFM_Builder, self).__init__(name=name)
+
+ def mute(self):
+ self._tfb_silent = True
+
+ def log(self):
+ """ Print and return the contents of log file """
+ with open(self._tfb_log_f, "r") as F:
+ log = F.read()
+ print(log)
+ return log
+
+ def report(self):
+ """Return the report on the job """
+ return self.unstash("Build Report")
+
+ def pre_eval(self):
+ """ Tests that need to be run in set-up state """
+
+ # Test that all required entries exist in config
+ diff = list(set(self._tfb_build_params) - set(self._tfb_cfg.keys()))
+ if diff:
+ print("Cound't find require build entry: %s in config" % diff)
+ return False
+ # TODO check validity of passed config values
+ # TODO test detection of srec
+ # self.srec_path = shutil.which("srec_cat")
+ return True
+
+ def pre_exec(self, eval_ret):
+ """ Create all required directories, files if they do not exist """
+
+ self._tfb_build_dir = os.path.join(self._tfb_work_dir,
+ self.get_name())
+ # Ensure we have a clean build directory
+ shutil.rmtree(self._tfb_build_dir, ignore_errors=True)
+
+ self._tfb_cfg["TFM_ROOT"] = self._tfb_tfm_dir
+
+ # Append the path for the config
+ self._tfb_cfg["PROJ_CONFIG"] = os.path.join(self._tfb_tfm_dir,
+ self._tfb_cfg[("PROJ_"
+ "CONFIG")])
+
+ # Log will be placed in work directory, named as the build dir
+ self._tfb_log_f = "%s.log" % self._tfb_build_dir
+
+ # Confirm that the work/build directory exists
+ for p in [self._tfb_work_dir, self._tfb_build_dir]:
+ if not os.path.exists(p):
+ os.makedirs(p)
+
+ # Calcuate a list of expected binaries
+ binaries = []
+
+ # If install is asserted pick the iems from the appropriate location
+ if self._tfb_install:
+
+ fvp_path = os.path.join(self._tfb_build_dir,
+ "install", "outputs", "fvp")
+ platform_path = os.path.join(self._tfb_build_dir,
+ "install",
+ "outputs",
+ self._tfb_cfg["TARGET_PLATFORM"])
+
+ # Generate a list of binaries included in both directories
+ common_bin_list = ["tfm_%s.%s" % (s, e) for s in ["s", "ns"]
+ for e in ["bin", "axf"]]
+ if self._tfb_cfg["WITH_MCUBOOT"]:
+ common_bin_list += ["mcuboot.%s" % e for e in ["bin", "axf"]]
+
+ # When building with bootloader extra binaries are expected
+ binaries += [os.path.join(platform_path, b) for b in
+ ["tfm_sign.bin",
+ "tfm_full.bin"]]
+ binaries += [os.path.join(fvp_path, b) for b in
+ ["tfm_s_ns_concatenated.bin",
+ "tfm_s_ns_signed.bin"]]
+
+ binaries += [os.path.join(p, b) for p in [fvp_path, platform_path]
+ for b in common_bin_list]
+
+ # Add Musca required binaries
+ if self._tfb_cfg["TARGET_PLATFORM"] == "MUSCA_A":
+ binaries += [os.path.join(platform_path,
+ "musca_firmware.hex")]
+
+ self._tfb_binaries = binaries
+
+ else:
+ binaries += [os.path.join(self._tfb_build_dir, "app", "tfm_ns")]
+ if "ConfigCoreTest" in self._tfb_build_dir:
+ binaries += [os.path.join(self._tfb_build_dir,
+ "unit_test", "tfm_s")]
+ else:
+ binaries += [os.path.join(self._tfb_build_dir, "app",
+ "secure_fw", "tfm_s")]
+ if self._tfb_cfg["WITH_MCUBOOT"]:
+ binaries += [os.path.join(self._tfb_build_dir,
+ "bl2", "ext", "mcuboot", "mcuboot")]
+
+ ext = ['.bin', '.axf']
+ self._tfb_binaries = ["%s%s" % (n, e) for n in binaries
+ for e in ext]
+
+ # Add Musca required binaries
+ if self._tfb_cfg["TARGET_PLATFORM"] == "MUSCA_A":
+ self._tfb_binaries += [os.path.join(self._tfb_build_dir,
+ "tfm_sign.bin")]
+ self._tfb_binaries += [os.path.join(self._tfb_build_dir,
+ "musca_firmware.hex")]
+
+ def get_binaries(self,
+ bootl=None,
+ bin_s=None,
+ bin_ns=None,
+ bin_sign=None,
+ filt=None):
+ """ Return the absolute location of binaries (from config)
+ if they exist. Can add a filter parameter which will only
+ consider entries with /filter/ in their path as a directory """
+ ret_boot = None
+ ret_bin_ns = None
+ ret_bin_s = None
+ ret_bin_sign = None
+
+ # Apply filter as a /filter/ string to the binary list
+ filt = "/" + filt + "/" if filter else None
+ binaries = list(filter(lambda x: filt in x, self._tfb_binaries)) \
+ if filt else self._tfb_binaries
+
+ for obj_file in binaries:
+ fname = os.path.split(obj_file)[-1]
+ if bootl:
+ if fname == bootl:
+ ret_boot = obj_file
+ continue
+ if bin_s:
+ if fname == bin_s:
+ ret_bin_s = obj_file
+ continue
+
+ if bin_ns:
+ if fname == bin_ns:
+ ret_bin_ns = obj_file
+ continue
+ if bin_sign:
+ if fname == bin_sign:
+ ret_bin_sign = obj_file
+ continue
+ return [ret_boot, ret_bin_s, ret_bin_ns, ret_bin_sign]
+
+ def task_exec(self):
+ """ Main tasks """
+
+ # Mark proccess running as status
+ self.set_status(-1)
+ # Go to build directory
+ os.chdir(self._tfb_build_dir)
+ # Compile the build commands
+ cmake_cmd = self._tfb_build_template % self._tfb_cfg
+ build_cmd = "cmake --build ./ -- -j %s" % self._tfb_build_threads
+
+ # Pass the report to later stages
+ rep = {"build_cmd": "%s" % build_cmd,
+ "cmake_cmd": "%s" % cmake_cmd}
+ self.stash("Build Report", rep)
+
+ # Calll camke to configure the project
+ if not subprocess_log(cmake_cmd,
+ self._tfb_log_f,
+ prefix=cmake_cmd,
+ silent=self._tfb_silent):
+ # Build it
+ if subprocess_log(build_cmd,
+ self._tfb_log_f,
+ append=True,
+ prefix=build_cmd,
+ silent=self._tfb_silent):
+ raise Exception("Build Failed please check log: %s" %
+ self._tfb_log_f)
+ else:
+ raise Exception("Cmake Failed please check log: %s" %
+ self._tfb_log_f)
+
+ if self._tfb_install:
+ install_cmd = "cmake --build ./ -- -j install"
+ if subprocess_log(install_cmd,
+ self._tfb_log_f,
+ append=True,
+ prefix=install_cmd,
+ silent=self._tfb_silent):
+ raise Exception(("Make install Failed."
+ " please check log: %s") % self._tfb_log_f)
+ if self._tfb_cfg["TARGET_PLATFORM"] == "MUSCA_A":
+ boot_f, s_bin, ns_bin, sns_signed_bin = self.get_binaries(
+ bootl="mcuboot.bin",
+ bin_s="tfm_s.bin",
+ bin_ns="tfm_ns.bin",
+ bin_sign="tfm_sign.bin",
+ filt="MUSCA_A")
+ self.convert_to_hex(boot_f, sns_signed_bin)
+ self._t_stop()
+
+ def sign_img(self, secure_bin, non_secure_bin):
+ """Join a secure and non secure image and sign them"""
+
+ imgtool_dir = os.path.join(self._tfb_tfm_dir,
+ "bl2/ext/mcuboot/scripts/")
+ flash_layout = os.path.join(self._tfb_tfm_dir,
+ "platform/ext/target/musca_a/"
+ "partition/flash_layout.h")
+ sign_cert = os.path.join(self._tfb_tfm_dir,
+ "bl2/ext/mcuboot/root-rsa-2048.pem")
+ sns_unsigned_bin = os.path.join(self._tfb_build_dir,
+ "sns_unsigned.bin")
+ sns_signed_bin = os.path.join(self._tfb_build_dir, "sns_signed.bin")
+
+ # Early versions of the tool hard relative imports, run from its dir
+ os.chdir(imgtool_dir)
+ assemble_cmd = ("python3 assemble.py -l %(layout)s -s %(s)s "
+ "-n %(ns)s -o %(sns)s") % {"layout": flash_layout,
+ "s": secure_bin,
+ "ns": non_secure_bin,
+ "sns": sns_unsigned_bin
+ }
+ sign_cmd = ("python3 imgtool.py sign -k %(cert)s --align 1 -v "
+ "1.0 -H 0x400 --pad 0x30000 "
+ "%(sns)s %(sns_signed)s") % {"cert": sign_cert,
+ "sns": sns_unsigned_bin,
+ "sns_signed": sns_signed_bin
+ }
+ run_proccess(assemble_cmd)
+ run_proccess(sign_cmd)
+ # Return to build directory
+ os.chdir(self._tfb_build_dir)
+ return sns_signed_bin
+
+ def convert_to_hex(self,
+ boot_bin,
+ sns_signed_bin,
+ qspi_base=0x200000,
+ boot_size=0x10000):
+ """Convert a signed image to an intel hex format with mcuboot """
+ if self._tfb_install:
+ platform_path = os.path.join(self._tfb_build_dir,
+ "install",
+ "outputs",
+ self._tfb_cfg["TARGET_PLATFORM"])
+ firmware_hex = os.path.join(platform_path, "musca_firmware.hex")
+ else:
+ firmware_hex = os.path.join(self._tfb_build_dir,
+ "musca_firmware.hex")
+
+ img_offset = qspi_base + boot_size
+ merge_cmd = ("srec_cat %(boot)s -Binary -offset 0x%(qspi_offset)x "
+ "%(sns_signed)s -Binary -offset 0x%(img_offset)x "
+ "-o %(hex)s -Intel") % {"boot": boot_bin,
+ "sns_signed": sns_signed_bin,
+ "hex": firmware_hex,
+ "qspi_offset": qspi_base,
+ "img_offset": img_offset
+ }
+ run_proccess(merge_cmd)
+ return
+
+ def post_eval(self):
+ """ Verify that the artefacts exist """
+ print("%s Post eval" % self.get_name())
+
+ ret_eval = False
+ rep = self.unstash("Build Report")
+ missing_binaries = list(filter(lambda x: not os.path.isfile(x),
+ self._tfb_binaries))
+
+ if len(missing_binaries):
+ print("ERROR: Could not locate the following binaries:")
+ print("\n".join(missing_binaries))
+
+ # Update the artifacts to not include missing ones
+ artf = [n for n in self._tfb_binaries if n not in missing_binaries]
+ # TODO update self._tfb_binaries
+ ret_eval = False
+ else:
+ print("SUCCESS: Produced binaries:")
+ print("\n".join(self._tfb_binaries))
+ ret_eval = True
+
+ artf = self._tfb_binaries
+
+ # Add artefact related information to report
+ rep["log"] = self._tfb_log_f
+ rep["missing_artefacts"] = missing_binaries
+ rep["artefacts"] = artf
+
+ rep["status"] = "Success" if ret_eval else "Failed"
+ self.stash("Build Report", rep)
+ return ret_eval
+
+ def post_exec(self, eval_ret):
+ """ """
+
+ if eval_ret:
+ print("TFM Builder %s was Successful" % self.get_name())
+ else:
+ print("TFM Builder %s was UnSuccessful" % self.get_name())
diff --git a/tfm_ci_pylib/utils.py b/tfm_ci_pylib/utils.py
new file mode 100755
index 0000000..7d1ca46
--- /dev/null
+++ b/tfm_ci_pylib/utils.py
@@ -0,0 +1,285 @@
+#!/usr/bin/env python3
+
+""" utils.py:
+
+ various simple and commonly used methods and classes shared by the scripts
+ in the CI environment """
+
+from __future__ import print_function
+
+__copyright__ = """
+/*
+ * Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ */
+ """
+__author__ = "Minos Galanakis"
+__email__ = "minos.galanakis@linaro.org"
+__project__ = "Trusted Firmware-M Open CI"
+__status__ = "stable"
+__version__ = "1.0"
+
+import os
+import sys
+import yaml
+import argparse
+import json
+import itertools
+from collections import OrderedDict, namedtuple
+from subprocess import Popen, PIPE, STDOUT
+
+
+def detect_python3():
+ """ Return true if script is run with Python3 interpreter """
+
+ return sys.version_info > (3, 0)
+
+
+def print_test_dict(data_dict,
+ pad_space=80,
+ identation=5,
+ titl="Summary",
+ pad_char="*"):
+
+ """ Configurable print formatter aimed for dictionaries of the type
+ {"TEST NAME": "RESULT"} used in CI systems. It will also return
+ the string which is printing """
+
+ # Calculate pad space bewteen variables x, y t achieve alignment on y
+ # taking into consideration a maximum aligment boundary p and
+ # possible indentation i
+ def flex_pad(x, y, p, i):
+ return " " * (p - i * 2 - len(x) - len(y)) + "-> "
+
+ # Calculate the padding for the dataset
+ tests = [k + flex_pad(k,
+ v,
+ pad_space,
+ identation) + v for k, v in data_dict.items()]
+
+ # Add the identation
+ tests = map(lambda x: " " * identation + x, tests)
+
+ # Convert to string
+ tests = "\n".join(tests)
+
+ # Calcuate the top header padding ceiling any rounding errors
+ hdr_pad = (pad_space - len(titl) - 3) / 2
+
+ if detect_python3():
+ hdr_pad = int(hdr_pad)
+
+ # Generate a print formatting dictionary
+ print_dict = {"pad0": pad_char * (hdr_pad),
+ "pad1": pad_char * (hdr_pad + 1 if len(titl) % 2
+ else hdr_pad),
+ "sumry": tests,
+ "pad2": pad_char * pad_space,
+ "titl": titl}
+
+ # Compose & print the report
+ r = "\n%(pad0)s %(titl)s %(pad1)s\n\n%(sumry)s\n\n%(pad2)s\n" % print_dict
+ print(r)
+ return r
+
+
+def print_test(t_name=None, t_list=None, status="failed", tname="Tests"):
+ """ Print a list of tests in a stuctured ascii table format """
+
+ gfx_line1 = "=" * 80
+ gfx_line2 = "\t" + "-" * 70
+ if t_name:
+ print("%(line)s\n%(name)s\n%(line)s" % {"line": gfx_line1,
+ "name": t_name})
+ print("%s %s:" % (tname, status))
+ print(gfx_line2 + "\n" +
+ "\n".join(["\t| %(key)s%(pad)s|\n%(line)s" % {
+ "key": n,
+ "pad": (66 - len(n)) * " ",
+ "line": gfx_line2} for n in t_list]))
+
+
+def test(test_list,
+ test_dict,
+ test_name="TF-M Test",
+ pass_text=["PASSED", "PRESENT"],
+ error_on_failed=True,
+ summary=True):
+
+ """ Using input of a test_lst and a test results dictionary in the format
+ of test_name: resut key-value pairs, test() method will verify that Every
+ single method in the test_list has been tested and passed. Pass and Failed,
+ status tests can be overriden and error_on_failed flag, exits the script
+ with failure if a single test fails or is not detected. Returns a json
+ containing status and fields for each test passed/failed/missing, if error
+ on failed is not set.
+ """
+
+ t_report = {"name": test_name,
+ "success": None,
+ "passed": [],
+ "failed": [],
+ "missing": []}
+ # Clean-up tests that are not requested by test_list
+ test_dict = {k: v for k, v in test_dict.items() if k in test_list}
+
+ # Calculate the difference of the two sets to find missing tests
+ t_report["missing"] = list(set(test_list) - set(test_dict.keys()))
+
+ # Sor the items into the apropriate lists (failed or passed)
+ # based on their status.
+ for k, v in test_dict.items():
+ # print(k, v)
+ key = "passed" if v in pass_text else "failed"
+ t_report[key] += [k]
+
+ # For the test to pass every singe test in test_list needs to be present
+ # and be in the passed list
+ if len(test_list) == len(t_report["passed"]):
+ t_report["success"] = True
+ else:
+ t_report["success"] = False
+
+ # Print a summary
+ if summary:
+ if t_report["passed"]:
+ print_test(test_name, t_report["passed"], status="passed")
+ if t_report["missing"]:
+ print_test(test_name, t_report["missing"], status="missing")
+ if t_report["failed"]:
+ print_test(test_name, t_report["failed"], status="Failed")
+
+ print("\nTest %s has %s!" % (t_report["name"],
+ " been successful" if t_report["success"]
+ else "failed"))
+ print("-" * 80)
+ if error_on_failed:
+ syscode = 0 if t_report["success"] else 1
+ sys.exit(syscode)
+ return t_report
+
+
+def save_json(f_name, data_object):
+ """ Save object to json file """
+
+ with open(f_name, "w") as F:
+ F.write(json.dumps(data_object, indent=2))
+
+
+def save_dict_json(f_name, data_dict, sort_list=None):
+ """ Save a dictionary object to file with optional sorting """
+
+ if sort_list:
+ data_object = (sort_dict(data_dict, sort_list))
+ save_json(f_name, data_object)
+
+
+def sort_dict(config_dict, sort_order_list=None):
+ """ Create a fixed order disctionary out of a config dataset """
+
+ if sort_order_list:
+ ret = OrderedDict([(k, config_dict[k]) for k in sort_order_list])
+ else:
+ ret = OrderedDict([(k, config_dict[k]) for k in sorted(config_dict)])
+ return ret
+
+
+def load_json(f_name):
+ """ Load object from json file """
+
+ with open(f_name, "r") as F:
+ try:
+ return json.loads(F.read())
+ except ValueError as exc:
+ print("No JSON object could be decoded from file: %s" % f_name)
+ except IOError:
+ print("Error opening file: %s" % f_name)
+ raise Exception("Failed to load file")
+
+
+def load_yaml(f_name):
+
+ # Parse command line arguments to override config
+ with open(f_name, "r") as F:
+ try:
+ return yaml.load(F.read())
+ except yaml.YAMLError as exc:
+ print("Error parsing file: %s" % f_name)
+ except IOError:
+ print("Error opening file: %s" % f_name)
+ raise Exception("Failed to load file")
+
+
+def subprocess_log(cmd, log_f, prefix=None, append=False, silent=False):
+ """ Run a command as subproccess an log the output to stdout and fileself.
+ If prefix is spefified it will be added as the first line in file """
+
+ with open(log_f, 'a' if append else "w") as F:
+ if prefix:
+ F.write(prefix + "\n")
+ pcss = Popen(cmd,
+ stdout=PIPE,
+ stderr=STDOUT,
+ shell=True,
+ env=os.environ)
+ for line in pcss.stdout:
+ if detect_python3():
+ line = line.decode("utf-8")
+ if not silent:
+ sys.stdout.write(line)
+ F.write(line)
+ pcss.communicate()
+ return pcss.returncode
+ return
+
+
+def run_proccess(cmd):
+ """ Run a command as subproccess an log the output to stdout and file.
+ If prefix is spefified it will be added as the first line in file """
+
+ pcss = Popen(cmd,
+ stdout=PIPE,
+ stderr=PIPE,
+ shell=True,
+ env=os.environ)
+ pcss.communicate()
+ return pcss.returncode
+
+
+def list_chunks(l, n):
+ """ Yield successive n-sized chunks from l. """
+
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def export_config_map(config_m, dir=None):
+ """ Will export a dictionary of configurations to a group of JSON files """
+
+ _dir = dir if dir else os.getcwd()
+ for _cname, _cfg in config_m.items():
+ _cname = _cname.lower()
+ _fname = os.path.join(_dir, _cname + ".json")
+ print("Exporting config %s" % _fname)
+ save_json(_fname, _cfg)
+
+
+def gen_cfg_combinations(name, categories, *args):
+ """ Create a list of named tuples of `name`, with elements defined in a
+ space separated string `categories` and equal ammount of lists for said
+ categories provided as arguments. Order of arguments should match the
+ order of the categories lists """
+
+ build_config = namedtuple(name, categories)
+ return [build_config(*x) for x in itertools.product(*args)]
+
+
+def get_cmd_args(descr="", parser=None):
+ """ Parse command line arguments """
+ # Parse command line arguments to override config
+
+ if not parser:
+ parser = argparse.ArgumentParser(description=descr)
+ return parser.parse_args()
diff --git a/util_cmake.sh b/util_cmake.sh
new file mode 100755
index 0000000..8895123
--- /dev/null
+++ b/util_cmake.sh
@@ -0,0 +1,299 @@
+#!/bin/bash
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+
+##
+##@file
+##@brief Common utility functions used by CMake related build and utility scripts
+##
+##This file can be "sourced" from other scripts to get access to variables and functions
+##defined here.
+##Example \code{.sh}. <path-to-tfm-ci-repo>/util_cmake.sh\endcode
+##or \code{.sh}source <path-to-tfm-ci-repo>/util_cmake.sh\endcode
+##
+
+##@fn fix_win_path(string path)
+##@brief Convert cygwin and msys path to windows like.
+##@param[in] path
+##@returns path in windows format
+##
+##This function converts MSYS and cygwin paths to windows like path. Can be used
+##to print paths in error message which can be used withouth conversion. This
+##way for example you can get "clickable" path in Eclipse error window.
+##
+##Usage:
+## Assuming current directory is <i>c:/somedir1/somedir2</i>
+## command | result
+## --------|-------
+## fix_win_path "/cygdrive/c/foo/bar"| c:/foo/bar
+## fix_win_path "/c/foo/bar"| c:/foo/bar
+## fix_win_path "../somedir1/foo/bar"| ../somedir1/foo/bar
+## fix_win_path `get_full_path "../somedir1/foo/bar"` | c:/somedir1/foo/bar
+##
+#This iis needed for doxygen for now.
+#!void fix_win_path(string path){};
+#
+function fix_win_path() {
+ local path="$@"
+ #See if we run on windows
+ if [ -e "c:/" ]
+ then
+ #sed:
+ # 1. match /cygdrive/c/ like paths and convert to the c:/ format
+ # 2. if 1 did not match conver /c/ path to c:/ format
+ path=`builtin echo "$path"|sed "s/\/cygdrive\/\([a-zA-Z]\)\//\1:\//;tx;s/\/\([a-zA-Z]\)\//\1:\//;:x"`
+ fi
+ builtin echo "$path"
+}
+
+##@fn get_full_path(string path)
+##@brief Convert the passed path to full path.
+##@param[in] path
+##@returns path converted to absolute full path.
+##
+##This function converts a path to absolute full path. The function will return
+##execution environment specific path (/cygdrive/ under Cygwin c:/ under MSys
+##and /foo/bar under Linux).
+##The patch to conver may or may not contain a file name.
+##
+##Usage:
+## Assuming current directory is <i>c:/somedir1/somedir2</i>
+## environment | command | result
+## --------|--------|-------
+## Cygwin|get_full_path "."| /cygdrive/c/somedir1/somedir2
+## MSys|get_full_path "."| c:/somedir1/somedir2
+## Linux|get_full_path "."| /somedir1/somedir2
+##
+#This iis needed for doxygen for now.
+#!void get_full_path(string path){};
+#
+function get_full_path {
+ local file=""
+ local dir=$1
+ #If the paramether is a file, split it to directory and file name component.
+ if [ -f "$dir" ]
+ then
+ dir=`dirname "$1"`
+ file=`basename "$1"`
+ fi
+
+ if [ -z "$dir" ]
+ then
+ dir="."
+ fi
+
+ #Enter the directory to get it's full path
+ pushd "$dir" >/dev/null
+ local path=$PWD
+ popd >/dev/null
+
+ #On windows further fixing is needed to get a windows path
+ case "$os_name" in
+ CYGWIN)
+ path=`cygpath -m $path`
+ ;;
+ MSYS)
+ path=`echo $path| sed "s/^\/\([a-zA-Z]\)\//\1:\//"`
+ ;;
+ esac
+
+ echo "$path/$file"
+}
+
+
+##@fn make_build_dir_name(path build_base_dir, string build_config_name)
+##@brief Create the location for the a build.
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@returns The generated path.
+##
+##This function will generate the name for a build directory. The generated name
+##follow the pattern "<build_base_dir>/build-<build_config_name>".
+##The generted path will be absolute.
+##
+##Usage:
+## Assuming CMakeList.txt file is in /foo/bar directory.
+## command | result
+## --------|-------
+## make_build_dir_name "/foo/bar" "test_build_st32" | Return /foo/bar/build-test_build_st32
+##
+#This iis needed for doxygen for now.
+#!void make_build_dir_name(path build_base_dir, string build_config_name){};
+#
+function make_build_dir_name() {
+ local build_base_dir=$(get_full_path $1)
+ local build_config_name=$2
+ echo "${build_base_dir}build-$build_config_name"
+}
+
+##@fn generate_project(string src_dir, string build_base_dir, string build_config_name, string cmake_params)
+##@brief Execute CMake generation phase for a project
+##@param[in] src_dir
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@param[in] cmake_params
+##@returns N/A
+##
+##This function will create a build directory named "build-<build_config_name>"
+##under the passed <build_base_dir> directory, and execute CMake inside to
+##generate "Unix Makefiles".
+##CMake output is saved to <build_base_dir>/build-<build_config_name>/build.log
+##
+##Usage:
+## Assuming CMakeList.txt file is in /foo/bar directory.
+## command | result
+## --------|-------
+## generate_project "/foo/bar" "/tmp/build" "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug"| Generate makefiles under /tmp/buid/build-test_build_st32 for project /foo/bar/CMakeLists.txt
+##
+#This iis needed for doxygen for now.
+#!void generate_project(string dir, string build_base_dir, string build_config_name, string cmake_params){};
+#
+function generate_project {
+ local src_dir=$1
+ local build_base_dir=$2
+ local bcfg_name=$3
+ local cm_params=$4
+ local bdir=$(make_build_dir_name "$build_base_dir" "$bcfg_name")
+ local error=0
+
+ #If build ditrectory exists, clear it
+ if [ -e "$bdir" ]
+ then
+ rm -rf $bdir/*
+ else
+ #Create build directory
+ mkdir $bdir
+ fi
+ #Enter build directory
+ if pushd $bdir >/dev/null
+ then
+ #Start cmake to generate makefiles and start the build
+ cmake -G"Unix Makefiles" CMAKE_MAKE_PROGRAM=$CMAKE_MAKE_PROGRAM $cm_params "$src_dir" 2>&1 | tee -a build.log
+ error=$(( ${PIPESTATUS[0]} + ${PIPESTATUS[1]} ))
+ #Back to original location
+ popd >/dev/null
+ else
+ error=1
+ fi
+ return $error
+}
+
+##@fn build_project(string src_dir, string build_base_dir, string build_config_name, string cmake_params)
+##@brief Build a CMake project with gnumake.
+##@param[in] src_dir
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@param[in] cmake_params
+##@returns N/A
+##
+##This function will call \ref generate_project to generate makefiles with CMake
+##and will execute make to build the project.
+##Make output is saved to <dir>/build-<build_config_name>/build.log
+##
+##Usage:
+## Assuming CMakeList.txt file is in /foo/bar directory.
+## command | result
+## --------|-------
+## build_project "/foo/bar" "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug"| Generate makefiles under /foo/bar/build-test_build_st32 for project /foo/bar/CMakeLists.txt
+##
+#This iis needed for doxygen for now.
+#!void build_project(string src_dir, string build_base_dir, string build_config_name, string cmake_params){};
+#
+function build_project {
+ local src_dir=$1
+ local build_base_dir=$2
+ local bcfg_name=$3
+ local cm_params=$4
+ local error=0
+
+ if generate_project "$src_dir" "$build_base_dir" "$bcfg_name" "$cm_params"
+ then
+ local bdir=$(make_build_dir_name "$build_base_dir" "$bcfg_name")
+ if pushd "$bdir" >/dev/null
+ then
+ cmake --build . -- -j VERBOSE=1 2>&1 | tee -a build.log
+ error=$(( ${PIPESTATUS[0]} + ${PIPESTATUS[1]} ))
+ fi
+ #Back to original location
+ popd >/dev/null
+ else
+ error=1
+ fi
+ return $error
+}
+
+##@fn proj_dir_to_name(path proj_dir)
+##@brief Convert a project directory to project name
+##@param[in] proj_dir
+##@returns The converted name.
+##
+##This function will convert a project path to project name. Conversion rules:
+## * the leading "./" is removed
+## * all '/' (directory separator's) are replaced by '-'
+## * if the result is empty, the name "top_level" is used.
+##
+##project_list.
+##
+##Usage:
+## Assuming CMakeList.txt file is in /foo/bar directory.
+## command | result
+## --------|-------
+## project_list=(./ app secure_fw test ); proj_dir_to_name "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug" project_list | Build all projects listed in project_list array.
+##
+#This iis needed for doxygen for now.
+#!void proj_dir_to_name(path proj_dir){};
+#
+function proj_dir_to_name {
+ local proj=$1
+ local name=$(echo "$proj" | sed 's/^\.\///;s/\//-/g')
+ if [ -z "$name" ]
+ then
+ name="top_level"
+ fi
+ echo "$name"
+}
+
+##@fn build_proj_set(path build_base_dir, string build_config_name, string cmake_params, path project_list[])
+##@brief Build a CMake project with gnumake.
+##@param[in] build_base_dir
+##@param[in] build_config_name
+##@param[in] cmake_params
+##@param[in] project_list
+##@returns N/A
+##
+##This function will call \ref build_project for all CMake projects listed in
+##project_list.
+##
+##Usage:
+## Assuming CMakeList.txt file is in /foo/bar directory.
+## command | result
+## --------|-------
+## project_list=(./ app secure_fw test ); build_proj_set "test_build_st32" "-DCMAKE_BUILD_TYPE=Debug" project_list | Build all projects listed in project_list array.
+##
+#This iis needed for doxygen for now.
+#!void build_proj_set(path build_base_dir, string build_config_name, string cmake_params, path project_list[]){};
+#
+function build_proj_set {
+ local build_base_dir=$1
+ local bcfg_name=$2
+ local cm_params=$3
+ local -n ref_project_list
+ ref_project_list=$4
+ local error=0
+ #For all projects in the list
+ for proj in "${ref_project_list[@]}"
+ do
+ #Convert the project location to a name.
+ local bcfg_name_ext="${bcfg_name}"_$(proj_dir_to_name "$proj")
+ #Convert project location to absolute path.
+ proj=$(get_full_path "$proj")
+ echo "build_project $proj $build_base_dir $bcfg_name_ext $cm_params"
+ #Build the project
+ build_project "$proj" "$build_base_dir" "$bcfg_name_ext" "$cm_params" || error=1
+ done
+ return $error
+}
diff --git a/virtualevn/requirements_tfm_python2.txt b/virtualevn/requirements_tfm_python2.txt
new file mode 100644
index 0000000..add452d
--- /dev/null
+++ b/virtualevn/requirements_tfm_python2.txt
@@ -0,0 +1,7 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+pygments 2.2.0
diff --git a/virtualevn/requirements_tfm_python3.txt b/virtualevn/requirements_tfm_python3.txt
new file mode 100644
index 0000000..bdcac54
--- /dev/null
+++ b/virtualevn/requirements_tfm_python3.txt
@@ -0,0 +1,11 @@
+#-------------------------------------------------------------------------------
+# Copyright (c) 2018-2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#-------------------------------------------------------------------------------
+Jinja2==2.10
+MarkupSafe==1.0
+PyYAML==3.12
+pycryptodome==3.6.6
+pyasn1==0.1.9