Initial commit.

 - qa-tools public release which includes:
    - trace-based coverage tool
    - quality metrics measurement and tracking setup
    - associated in-source documentation.

Signed-off-by: Basil Eljuse <basil.eljuse@arm.com>
diff --git a/coverage-tool/coverage-reporting/branch_coverage.sh b/coverage-tool/coverage-reporting/branch_coverage.sh
new file mode 100755
index 0000000..3dc88f3
--- /dev/null
+++ b/coverage-tool/coverage-reporting/branch_coverage.sh
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+
+##############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+##############################################################################
+
+#==============================================================================
+# FILE: branch_coverage.sh
+#
+# DESCRIPTION: Generates intermediate layer json file and then
+# code coverage HTML reports using LCOV report Open Source tool
+#==============================================================================
+
+set +x
+set -e
+
+ERROR_FILE=coverage_error.log
+
+###############################################################################
+# Prints error message to STDERR and log file.
+# Globals:
+# ERROR_FILE
+# Arguments:
+#   None
+# Outputs:
+#   Writes error to STDERR and log file with a timestamp
+###############################################################################
+err() {
+  echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $*" | tee -a ${ERROR_FILE} 1>&2
+}
+
+touch ${ERROR_FILE}
+if ! [ -x "$(command -v lcov)" ]; then
+  err 'Error: lcov is not installed. Install it with:\nsudo apt install lcov\n'
+  exit 1
+fi
+
+###############################################################################
+# Prints script usage.
+# Arguments:
+#   None
+# Outputs:
+#   Writes usage to stdout
+###############################################################################
+usage()
+{
+    # print the usage information
+    printf "Usage: $(basename $0) [options]\n"
+    printf "\t params:\n"
+    printf "\t --config Configuration json file. Required.\n"
+    printf "\t --workspace Local workspace folder where source codes reside. \
+            Required.\n"
+    printf "\t --json-path Intermediate json file name. Optional defaults to \
+            'output_file.json'\n"
+    printf "\t --outdir Report folder. Optional defaults to 'out'\n"
+    printf "\t -h|--help Display usage\n"
+    printf "Example of usage:\n"
+    printf "./branch_coverage.sh --config config_file.json \
+            --workspace /server_side/source/ --outdir html_report\n"
+    exit 1
+}
+
+# default values
+JSON_PATH=output_file.json
+OUTDIR=out
+
+###############################################################################
+# Parse arguments.
+# Globals:
+# CONFIG_JSON
+# LOCAL_WORKSPACE
+# JSON_PATH
+# OUTDIR
+# Arguments:
+#   Command line arguments
+# Outputs:
+#   Writes usage to stdout
+###############################################################################
+parse_arguments()
+{
+  while [ $# -gt 1 ]
+  do
+    key="$1"
+    case $key in
+      --config)
+        CONFIG_JSON="$2"
+        shift
+      ;;
+      --workspace)
+        LOCAL_WORKSPACE="$2"
+        shift
+      ;;
+      --json-path)
+        JSON_PATH="$2"
+        shift
+      ;;
+      --outdir)
+        OUTDIR="$2"
+        shift
+      ;;
+      -h|--help)
+        usage
+      ;;
+      *)
+        printf "Unknown argument $key\n"
+        usage
+      ;;
+    esac
+    shift
+  done
+}
+
+
+parse_arguments $@
+
+if [ -z "$LOCAL_WORKSPACE" ] || [ -z "$CONFIG_JSON" ]; then
+    usage
+fi
+
+if [ ! -d "$LOCAL_WORKSPACE" ]; then
+    err "$LOCAL_WORKSPACE doesn't exist\n"
+    exit 1
+fi
+
+if [ ! -f "$CONFIG_JSON" ]; then
+    err "$CONFIG_JSON doesn't exist\n"
+    exit 1
+fi
+
+clear
+echo "Generating intermediate layer file '$JSON_PATH'..."
+python3 intermediate_layer.py --config-json "$CONFIG_JSON" --local-workspace $LOCAL_WORKSPACE
+echo "Converting intermediate layer file to info file..."
+python3 generate_info_file.py --workspace $LOCAL_WORKSPACE --json $JSON_PATH
+echo "Generating LCOV report at '$OUTDIR'..."
+genhtml --branch-coverage coverage.info --output-directory $OUTDIR
+mv coverage.info $OUTDIR/coverage.info
+mv error_log.txt $OUTDIR/error_log.txt
diff --git a/coverage-tool/coverage-reporting/clone_sources.py b/coverage-tool/coverage-reporting/clone_sources.py
new file mode 100644
index 0000000..fb1807d
--- /dev/null
+++ b/coverage-tool/coverage-reporting/clone_sources.py
@@ -0,0 +1,151 @@
+# !/usr/bin/env python
+###############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+###############################################################################
+
+###############################################################################
+# FILE: clone_sources.py
+#
+# DESCRIPTION: Clone the source files for code coverage
+###############################################################################
+
+import os
+import subprocess
+import json
+import time
+from random import random
+
+
+def call_cmd(cmd, print_cmd=False):
+    """
+    Function that execute an os command and returns its output
+
+    :param cmd: OS command as string
+    :param print_cmd: Optional argument to print the command in stdout
+    :return: The string output of the os command
+    """
+    if print_cmd:
+        print("+" + cmd)
+    out = subprocess.check_output(cmd, shell=True)
+    return out
+
+
+def skip_source(output_dir, source, handler=None):
+    """
+    Function that handles overwriting source files
+
+    :param output_dir: Folder where to put the source files and folders
+    :param source: Dictionary with the information the source
+    :return: True if must skip the given source cloning False otherwise
+    """
+    location = os.path.join(output_dir, source['LOCATION'])
+    # Check if exists and have files
+    if os.path.isdir(location):
+        if not os.listdir(location):
+            if handler is not None:
+                return handler(source, "Directory exists and is empty")
+            else:
+                # By default send a warning and overwrite it
+                print(("WARNING!: Directory {} already exists and is "
+                       "empty. Overwriting it...'").format(location))
+                os.rmdir(location)
+                return False
+        commit_id = call_cmd(("cd {} && git log -1 2>/dev/null | "
+                              "grep commit | awk '{{print $2}}'").format(
+                              location), print_cmd=True).strip()
+        if source['type'] == "git":
+            if commit_id == "":
+                # is not a git
+                if handler is not None:
+                    return handler(source, "Directory exists and is not git")
+                else:
+                    print(("WARNING!: Directory {} already exists and is not a"
+                           " git repo: '{}'").format(location, source['URL']))
+            elif commit_id != source["COMMIT"].strip():
+                # there are mismatching commit id's
+                if handler is not None:
+                    return handler(source, "Mismatch in gits")
+                else:
+                    print(("WARNING!: Mismatch in git repo {}\nExpected {}, "
+                           "Cloned {}").format(source['URL'], source['COMMIT'],
+                                               commit_id))
+        elif source['type'] == "http":
+            if handler is not None:
+                return handler(source,
+                               "WARNING!: Directory already exists")
+            else:
+                print("WARNING!: Directory {} already exists".format(
+                    location))
+        return True
+    return False
+
+
+class CloneSources(object):
+    """Class used to clone the source code needed to produce code coverage
+    reports.
+    """
+    def __init__(self, json_file):
+        self.json_file = json_file
+        self.json_data = None
+        self.load_json()
+
+    def load_json(self):
+        with open(self.json_file, "r") as json_file:
+            self.json_data = json.load(json_file)
+
+    def clone_repo(self, output_dir, overwrite_handler=None):
+        """
+        Clones or reproduces a folder with source code based in the
+        configuration in the json file
+
+        :param output_dir: Where to put the source files
+        :param overwrite_handler: Optional function to handle overwrites
+        """
+        if self.json_data is None:
+            self.load_json()
+        sources = []
+        try:
+            if 'parameters' in self.json_data:
+                sources = self.json_data['parameters']['sources']
+            elif 'configuration' in self.json_data:
+                sources = self.json_data['configuration']['sources']
+            else:
+                raise Exception("No correct format for json sources!")
+        except Exception as ex:
+            raise Exception(ex)
+
+        for source in sources:
+            if skip_source(output_dir, source, overwrite_handler):
+                continue
+            if source['type'] == "git":
+                git = source
+                url = git["URL"]
+                commit_id = git["COMMIT"]
+                output_loc = os.path.join(output_dir, git["LOCATION"])
+                cmd = "git clone {} {}".format(url, output_loc)
+                output = call_cmd(cmd)
+                if git['REFSPEC']:
+                    call_cmd("cd {};git fetch -q origin {}".format(
+                        output_loc, git['REFSPEC']))
+                if commit_id:
+                    call_cmd("cd {};git checkout -q {}".format(
+                        output_loc, commit_id))
+                else:
+                    call_cmd("cd {};git checkout -q FETCH_HEAD".format(
+                        output_loc))
+            elif source['type'] == 'http':
+                site = source
+                output_loc = os.path.join(output_dir, site["LOCATION"])
+                tmp_folder = os.path.join(output_dir,
+                                          "_tmp_{}_{}".format(time.time(),
+                                                              random()))
+                call_cmd("mkdir -p {}".format(tmp_folder))
+                call_cmd("wget -q {} -P {}".format(
+                    site['URL'], tmp_folder))
+                call_cmd("mkdir -p {}".format(output_loc))
+                if site['COMPRESSION'] == "xz":
+                    call_cmd("cd {};tar -xzf $(basename {}) -C {}".format(
+                        tmp_folder, site['URL'], output_loc))
+                call_cmd("rm -rf {}".format(tmp_folder))
diff --git a/coverage-tool/coverage-reporting/generate_info_file.py b/coverage-tool/coverage-reporting/generate_info_file.py
new file mode 100755
index 0000000..0c0f39a
--- /dev/null
+++ b/coverage-tool/coverage-reporting/generate_info_file.py
@@ -0,0 +1,410 @@
+# !/usr/bin/env python
+##############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+##############################################################################
+
+import os
+import sys
+import json
+import re
+import argparse
+
+
+def function_coverage(function_tuples, info_file):
+    """
+    Parses and get information from intermediate json file to info
+    file for function coverage
+
+    :param function_tuples: List of tuples with function name
+                            and its data as pairs.
+    :param info_file: Handler to for file writing coverage
+    """
+    total_func = 0
+    covered_func = 0
+    function_names = []
+    function_cov = []
+    for func_name, func_data in function_tuples:
+        function_names.append(
+            'FN:{},{}\n'.format(
+                func_data["line_number"],
+                func_name))
+        total_func += 1
+        if func_data["covered"]:
+            covered_func += 1
+            function_cov.append('FNDA:1,{}\n'.format(func_name))
+        else:
+            function_cov.append('FNDA:0,{}\n'.format(func_name))
+    info_file.write("\n".join(function_names))
+    info_file.write("\n".join(function_cov))
+    info_file.write('FNF:{}\n'.format(total_func))
+    info_file.write('FNH:{}\n'.format(covered_func))
+
+
+def line_coverage(lines_dict, info_file):
+    """
+    Parses and get information from intermediate json file to info
+    file for line coverage
+
+    :param lines_dict: Dictionary of lines with line number as key
+                       and its data as value
+    :param info_file: Handler to for file writing coverage
+    """
+    total_lines = 0
+    covered_lines = 0
+    for line in lines_dict:
+        total_lines += 1
+        if lines_dict[line]['covered']:
+            covered_lines += 1
+            info_file.write('DA:' + line + ',1\n')
+        else:
+            info_file.write('DA:' + line + ',0\n')
+    info_file.write('LF:' + str(total_lines) + '\n')
+    info_file.write('LH:' + str(covered_lines) + '\n')
+
+
+def sanity_check(branch_line, lines_dict, abs_path_file):
+    """
+    Check if the 'branch_line' line of the C source corresponds to actual
+    branching instructions in the assembly code. Also, check if that
+    line is covered. If it's not covered, this branching statement can
+    be omitted from the report.
+    Returns False and prints an error message if check is not successful,
+    True otherwise
+
+    :param branch_line: Source code line with the branch instruction
+    :param lines_dict: Dictionary of lines with line number as key
+                        and its data as value
+    :param abs_path_file: File name of the source file
+    """
+    if str(branch_line) not in lines_dict:
+        return False
+    found_branching = False
+    for i in lines_dict[str(branch_line)]['elf_index']:
+        for j in lines_dict[str(branch_line)]['elf_index'][i]:
+            string = lines_dict[str(branch_line)]['elf_index'][i][j][0]
+            # these cover all the possible branching instructions
+            if ('\tb' in string or
+                '\tcbnz' in string or
+                '\tcbz' in string or
+                '\ttbnz' in string or
+                    '\ttbz' in string):
+                # '\tbl' in string or  # already covered by '\tb'
+                # '\tblr' in string or  # already covered by '\tb'
+                # '\tbr' in string or  # already covered by '\tb'
+                found_branching = True
+    if not found_branching:
+        error_log.write(
+            '\nSomething possibly wrong:\n\tFile ' +
+            abs_path_file +
+            ', line ' +
+            str(branch_line) +
+            '\n\tshould be a branching statement but couldn\'t ' +
+            'find correspondence in assembly code')
+    return True
+
+
+def manage_if_branching(branch_line, lines_dict, info_file, abs_path_file):
+    """
+    Takes care of branch coverage, branch_line is the source code
+    line in which the 'if' statement is located the function produces
+    branch coverage info based on C source code and json file content
+
+    :param branch_line: Source code line with the 'if' instruction
+    :param lines_dict: Dictionary of lines with line number as key
+                        and its data as value
+    :param info_file: Handler to for file writing coverage
+    :param abs_path_file: File name of the source file
+    """
+    total_branch_local = 0
+    covered_branch_local = 0
+
+    if not sanity_check(branch_line, lines_dict, abs_path_file):
+        return(total_branch_local, covered_branch_local)
+    total_branch_local += 2
+    current_line = branch_line  # used to read lines one by one
+    # check for multiline if-condition and update current_line accordingly
+    parenthesis_count = 0
+    while True:
+        end_of_condition = False
+        for char in lines[current_line]:
+            if char == ')':
+                parenthesis_count -= 1
+                if parenthesis_count == 0:
+                    end_of_condition = True
+            elif char == '(':
+                parenthesis_count += 1
+        if end_of_condition:
+            break
+        current_line += 1
+    # first branch
+    # simple case: 'if' statements with no braces
+    if '{' not in lines[current_line] and '{' not in lines[current_line + 1]:
+
+        if (str(current_line + 1) in lines_dict and
+                lines_dict[str(current_line + 1)]['covered']):
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '0,' + '1\n')
+            covered_branch_local += 1
+        else:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '0,' + '0\n')
+        current_line += 1
+
+    # more complex case: '{' after the 'if' statement
+    else:
+        if '{' in lines[current_line]:
+            current_line += 1
+        else:
+            current_line += 2
+
+        # we need to check whether at least one line in the block is covered
+        found_covered_line = False
+
+        # this is a simpler version of a stack used to check when a code block
+        # ends at the moment, it just checks for '{' and '}', doesn't take into
+        # account the presence of commented braces
+        brace_counter = 1
+        while True:
+            end_of_block = False
+            for char in lines[current_line]:
+                if char == '}':
+                    brace_counter -= 1
+                    if brace_counter == 0:
+                        end_of_block = True
+                elif char == '{':
+                    brace_counter += 1
+            if end_of_block:
+                break
+            if (str(current_line) in lines_dict and
+                    lines_dict[str(current_line)]['covered']):
+                found_covered_line = True
+
+            current_line += 1
+
+        if found_covered_line:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '0,' + '1\n')
+            covered_branch_local += 1
+        else:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '0,' + '0\n')
+
+    # second branch (if present). If not present, second branch is covered by
+    # default
+    current_line -= 1
+    candidate_else_line = current_line
+    while 'else' not in lines[current_line] and candidate_else_line + \
+            2 >= current_line:
+        current_line += 1
+        if current_line == len(lines):
+            break
+
+    # no 'else': branch covered by default
+    if current_line == candidate_else_line + 3:
+        info_file.write('BRDA:' + str(branch_line) + ',0,' + '1,' + '1\n')
+        covered_branch_local += 1
+        return(total_branch_local, covered_branch_local)
+
+    # 'else' found: check if opening braces are present
+    if '{' not in lines[current_line - 1] and '{' not in lines[current_line]:
+        if str(current_line + 1) in lines_dict:
+            if lines_dict[str(current_line + 1)]['covered']:
+                info_file.write(
+                    'BRDA:' +
+                    str(branch_line) +
+                    ',0,' +
+                    '1,' +
+                    '1\n')
+                covered_branch_local += 1
+            else:
+                info_file.write(
+                    'BRDA:' +
+                    str(branch_line) +
+                    ',0,' +
+                    '1,' +
+                    '0\n')
+        else:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '1,' + '0\n')
+
+    else:
+        if '{' in lines[current_line]:
+            current_line += 1
+        else:
+            current_line += 2
+        found_covered_line = False
+        while '}' not in lines[current_line]:
+            if (str(current_line) in lines_dict and
+                    lines_dict[str(current_line)]['covered']):
+                found_covered_line = True
+                break
+            current_line += 1
+        if found_covered_line:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '1,' + '1\n')
+            covered_branch_local += 1
+        else:
+            info_file.write('BRDA:' + str(branch_line) + ',0,' + '1,' + '0\n')
+
+    return(total_branch_local, covered_branch_local)
+
+
+def manage_switch_branching(switch_line, lines_dict, info_file, abs_path_file):
+    """
+    Takes care of branch coverage, branch_line is the source code
+    line in which the 'switch' statement is located the function produces
+    branch coverage info based on C source code and json file content
+
+    :param switch_line: Source code line with the 'switch' instruction
+    :param lines_dict: Dictionary of lines with line number as key
+                        and its data as value
+    :param info_file: Handler to for file writing coverage
+    :param abs_path_file: File name of the source file
+    """
+
+    total_branch_local = 0
+    covered_branch_local = 0
+
+    if not sanity_check(switch_line, lines_dict, abs_path_file):
+        return(total_branch_local, covered_branch_local)
+
+    current_line = switch_line  # used to read lines one by one
+    branch_counter = 0          # used to count the number of switch branches
+    brace_counter = 0
+
+    # parse the switch-case line by line, checking if every 'case' is covered
+    # the switch-case ends with a '}'
+    while True:
+        if '{' in lines[current_line]:
+            brace_counter += 1
+        if '}' in lines[current_line]:
+            brace_counter -= 1
+        if brace_counter == 0:
+            return(total_branch_local, covered_branch_local)
+        if 'case' in lines[current_line] or 'default' in lines[current_line]:
+            covered = False
+            total_branch_local += 1
+            inner_brace = 0
+            current_line += 1
+            while (('case' not in lines[current_line]
+                   and 'default' not in lines[current_line]) or
+                   inner_brace > 0):
+                if (str(current_line) in lines_dict and
+                        lines_dict[str(current_line)]['covered']):
+                    covered = True
+                if '{' in lines[current_line]:
+                    inner_brace += 1
+                    brace_counter += 1
+                if '}' in lines[current_line]:
+                    inner_brace -= 1
+                    brace_counter -= 1
+                if brace_counter == 0:
+                    break
+                current_line += 1
+            if covered:
+                info_file.write(
+                    'BRDA:' +
+                    str(switch_line) +
+                    ',0,' +
+                    str(branch_counter) +
+                    ',1\n')
+                covered_branch_local += 1
+            else:
+                info_file.write(
+                    'BRDA:' +
+                    str(switch_line) +
+                    ',0,' +
+                    str(branch_counter) +
+                    ',0\n')
+            if brace_counter == 0:
+                return(total_branch_local, covered_branch_local)
+            branch_counter += 1
+        else:
+            current_line += 1
+
+    return(total_branch_local, covered_branch_local)
+
+
+def branch_coverage(abs_path_file, info_file, lines_dict):
+    """
+    Produces branch coverage information, using the functions
+    'manage_if_branching' and 'manage_switch_branching'
+
+    :param abs_path_file: File name of the source file
+    :param info_file: Handler to for file writing coverage
+    :param lines_dict: Dictionary of lines with line number as key
+                       and its data as value
+    """
+    total_branch = 0
+    covered_branch = 0
+
+    # branch coverage: if statements
+    branching_lines = []
+
+    # regex: find all the lines starting with 'if' or 'else if'
+    # (possibly preceded by whitespaces/tabs)
+    pattern = re.compile(r"^\s+if|^\s+} else if|^\s+else if")
+    for i, line in enumerate(open(abs_path_file)):
+        for match in re.finditer(pattern, line):
+            branching_lines.append(i + 1)
+    while branching_lines:
+        t = manage_if_branching(branching_lines.pop(0), lines_dict,
+                                info_file, abs_path_file)
+        total_branch += t[0]
+        covered_branch += t[1]
+
+    # branch coverage: switch statements
+    switch_lines = []
+
+    # regex: find all the lines starting with 'switch'
+    # (possibly preceded by whitespaces/tabs)
+    pattern = re.compile(r"^\s+switch")
+    for i, line in enumerate(open(abs_path_file)):
+        for match in re.finditer(pattern, line):
+            switch_lines.append(i + 1)
+    while switch_lines:
+        t = manage_switch_branching(switch_lines.pop(0), lines_dict,
+                                    info_file, abs_path_file)
+        total_branch += t[0]
+        covered_branch += t[1]
+
+    info_file.write('BRF:' + str(total_branch) + '\n')
+    info_file.write('BRH:' + str(covered_branch) + '\n')
+
+
+parser = argparse.ArgumentParser(
+    description="Script to convert intermediate json file to LCOV info file")
+parser.add_argument('--workspace', metavar='PATH',
+                    help='Folder with source files structure',
+                    required=True)
+parser.add_argument('--json', metavar='PATH',
+                    help='Intermediate json file name',
+                    required=True)
+parser.add_argument('--info', metavar='PATH',
+                    help='Output info file name',
+                    default="coverage.info")
+args = parser.parse_args()
+with open(args.json) as json_file:
+    json_data = json.load(json_file)
+info_file = open(args.info, "w+")
+error_log = open("error_log.txt", "w+")
+file_list = json_data['source_files'].keys()
+
+for relative_path in file_list:
+    abs_path_file = os.path.join(args.workspace, relative_path)
+    if not os.path.exists(abs_path_file):
+        continue
+    source = open(abs_path_file)
+    lines = source.readlines()
+    info_file.write('TN:\n')
+    info_file.write('SF:' + os.path.abspath(abs_path_file) + '\n')
+    lines = [-1] + lines  # shifting the lines indexes to the right
+    function_coverage(
+        json_data['source_files'][relative_path]['functions'].items(),
+        info_file)
+    branch_coverage(abs_path_file, info_file,
+                    json_data['source_files'][relative_path]['lines'])
+    line_coverage(json_data['source_files'][relative_path]['lines'],
+                  info_file)
+    info_file.write('end_of_record\n\n')
+    source.close()
+
+json_file.close()
+info_file.close()
+error_log.close()
diff --git a/coverage-tool/coverage-reporting/intermediate_layer.py b/coverage-tool/coverage-reporting/intermediate_layer.py
new file mode 100644
index 0000000..794c7a4
--- /dev/null
+++ b/coverage-tool/coverage-reporting/intermediate_layer.py
@@ -0,0 +1,647 @@
+# !/usr/bin/env python
+###############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+###############################################################################
+
+###############################################################################
+# FILE: intermediate_layer.py
+#
+# DESCRIPTION: Creates an intermediate json file with information provided
+#              by the configuration json file, dwarf signatures and trace
+#              files.
+#
+###############################################################################
+
+import os
+import re
+import glob
+import argparse
+import subprocess
+import json
+from argparse import RawTextHelpFormatter
+import logging
+import time
+
+__version__ = "6.0"
+
+# Static map that defines the elf file source type in the intermediate json
+ELF_MAP = {
+    "bl1": 0,
+    "bl2": 1,
+    "bl31": 2,
+    "bl32": 3,
+    "scp_ram": 10,
+    "scp_rom": 11,
+    "mcp_rom": 12,
+    "mcp_ram": 13,
+    "custom_offset": 100
+}
+
+
+def os_command(command, show_command=False):
+    """
+    Function that execute an os command, on fail exit the program
+
+    :param command: OS command as string
+    :param show_command: Optional argument to print the command in stdout
+    :return: The string output of the os command
+    """
+    out = ""
+    try:
+        if show_command:
+            print("OS command: {}".format(command))
+        out = subprocess.check_output(
+            command, stderr=subprocess.STDOUT, shell=True)
+    except subprocess.CalledProcessError as ex:
+        raise Exception(
+            "Exception running command '{}': {}({})".format(
+                command, ex.output, ex.returncode))
+    return out.decode("utf8")
+
+
+def load_stats_from_traces(trace_globs):
+    """
+    Function to process and consolidate statistics from trace files
+
+    :param trace_globs: List of trace file patterns
+    :return: Dictionary with stats from trace files i.e.
+        {mem address in decimal}=(times executed, inst size)
+    """
+    stats = {}
+    stat_size = {}
+
+    # Make a list of unique trace files
+    trace_files = []
+    for tg in trace_globs:
+        trace_files.extend(glob.glob(tg))
+    trace_files = set(trace_files)
+
+    if not trace_files:
+        raise Exception("No trace files found for '{}'".format(trace_globs))
+    # Load stats from the trace files
+    for trace_file in trace_files:
+        try:
+            with open(trace_file, 'r') as f:
+                for line in f:
+                    data = line.split()
+                    address = int(data[0], 16)
+                    stat = int(data[1])
+                    size = int(data[2])
+                    stat_size[address] = size
+                    if address in stats:
+                        stats[address] += stat
+                    else:
+                        stats[address] = stat
+        except Exception as ex:
+            logger.error("@Loading stats from trace files:{}".format(ex))
+    # Merge the two dicts
+    for address in stats:
+        stats[address] = (stats[address], stat_size[address])
+    return stats
+
+
+def get_code_sections_for_binary(elf_name):
+    """
+    Function to return the ranges of memory address for sections of code
+    in the elf file
+
+    :param elf_name: Elf binary file name
+    :return: List of code sections tuples, i.e. (section type, initial
+            address, end address)
+    """
+    command = """%s -h %s | grep -B 1 CODE | grep -v CODE \
+                | awk '{print $2" "$4" "$3}'""" % (OBJDUMP, elf_name)
+    text_out = os_command(command)
+    sections = text_out.split('\n')
+    sections.pop()
+    secs = []
+    for sec in sections:
+        try:
+            d = sec.split()
+            secs.append((d[0], int(d[1], 16), int(d[2], 16)))
+        except Exception as ex:
+            logger.error(
+                "@Returning memory address code sections:".format(ex))
+    return secs
+
+
+def get_executable_ranges_for_binary(elf_name):
+    """
+    Get function ranges from an elf file
+
+    :param elf_name: Elf binary file name
+    :return: List of tuples for ranges i.e. (range start, range end)
+    """
+    # Parse all $x / $d symbols
+    symbol_table = []
+    command = r"""%s -s %s | awk '/\$[xatd]/ {print $2" "$8}'""" % (
+        READELF, elf_name)
+    text_out = os_command(command)
+    lines = text_out.split('\n')
+    lines.pop()
+    for line in lines:
+        try:
+            data = line.split()
+            address = int(data[0], 16)
+            _type = 'X' if data[1] in ['$x', '$t', '$a'] else 'D'
+        except Exception as ex:
+            logger.error("@Getting executable ranges:".format(ex))
+        symbol_table.append((address, _type))
+
+    # Add markers for end of code sections
+    sections = get_code_sections_for_binary(elf_name)
+    for sec in sections:
+        symbol_table.append((sec[1] + sec[2], 'S'))
+
+    # Sort by address
+    symbol_table = sorted(symbol_table, key=lambda tup: tup[0])
+
+    # Create ranges (list of START/END tuples)
+    ranges = []
+    range_start = symbol_table[0][0]
+    rtype = symbol_table[0][1]
+    for sym in symbol_table:
+        if sym[1] != rtype:
+            if rtype == 'X':
+                # Substract one because the first address of the
+                # next range belongs to the next range.
+                ranges.append((range_start, sym[0] - 1))
+            range_start = sym[0]
+            rtype = sym[1]
+    return ranges
+
+
+def list_of_functions_for_binary(elf_name):
+    """
+    Get an array of the functions in the elf file
+
+    :param elf_name: Elf binary file name
+    :return: An array of function address start, function address end,
+            function dwarf signature (sources) addressed by function name
+    """
+    _functions = {}
+    command = "%s -t %s | awk 'NR>4' | sed /^$/d" % (OBJDUMP, elf_name)
+    symbols_output = os_command(command)
+    rex = r'([0-9a-fA-F]+) (.{7}) ([^ ]+)[ \t]([0-9a-fA-F]+) (.*)'
+    symbols = symbols_output.split('\n')[:-1]
+    for sym in symbols:
+        try:
+            symbol_details = re.findall(rex, sym)
+            symbol_details = symbol_details[0]
+            if 'F' not in symbol_details[1]:
+                continue
+            function_name = symbol_details[4]
+            # We don't want the .hidden for hidden functions
+            if function_name.startswith('.hidden '):
+                function_name = function_name[len('.hidden '):]
+            if function_name not in _functions:
+                _functions[function_name] = {'start': symbol_details[0],
+                                             'end': symbol_details[3],
+                                             'sources': False}
+            else:
+                logger.warning("'{}' duplicated in '{}'".format(
+                    function_name,
+                    elf_name))
+        except Exception as ex:
+            logger.error("@Listing functions at file {}: {}".format(
+                elf_name,
+                ex))
+    return _functions
+
+
+def apply_functions_exclude(elf_config, functions):
+    """
+    Remove excluded functions from the list of functions
+
+    :param elf_config: Config for elf binary file
+    :param functions: Array of functions in the binary elf file
+    :return: Tuple with included and excluded functions
+    """
+    if 'exclude_functions' not in elf_config:
+        return functions, []
+    incl = {}
+    excl = {}
+    for fname in functions:
+        exclude = False
+        for rex in elf_config['exclude_functions']:
+            if re.match(rex, fname):
+                exclude = True
+                excl[fname] = functions[fname]
+                break
+        if not exclude:
+            incl[fname] = functions[fname]
+    return incl, excl
+
+
+def remove_workspace(path, workspace):
+    """
+    Get the relative path to a given workspace
+
+    :param path: Path relative to the workspace to be returned
+    :param workspace: Path.
+    """
+    ret = path if workspace is None else os.path.relpath(path, workspace)
+    # print("{} => {}".format(path, ret))
+    return ret
+
+
+def get_function_line_numbers(source_file):
+    """
+    Using ctags get all the function names with their line numbers
+    within the source_file
+
+    :return: Dictionary with function name as key and line number as value
+    """
+    function_lines = os_command(
+        "ctags -x --c-kinds=f {}".format(source_file)).split("\n")
+    fln = {}
+    try:
+        for line in function_lines:
+            cols = line.split()
+            if len(cols) < 3:
+                continue
+            if cols[1] == "function":
+                fln[cols[0]] = int(cols[2])
+            elif cols[1] == "label" and cols[0] == "func":
+                fln[cols[-1]] = int(cols[2])
+    except BaseException:
+        logger.warning("Warning: Can't get all function line numbers from %s" %
+                       source_file)
+    return fln
+
+
+class FunctionLineNumbers(object):
+
+    def __init__(self, workspace):
+        self.filenames = {}
+        self.workspace = workspace
+
+    def get_line_number(self, filename, function_name):
+        if not FUNCTION_LINES_ENABLED:
+            return 0
+        if filename not in self.filenames:
+            newp = os.path.join(self.workspace, filename)
+            self.filenames[filename] = get_function_line_numbers(newp)
+        return 0 if function_name not in self.filenames[filename] else \
+            self.filenames[filename][function_name]
+
+
+class PostProcessCC(object):
+    """Class used to process the trace data along with the dwarf
+    signature files to produce an intermediate layer in json with
+    code coverage in assembly and c source code.
+    """
+
+    def __init__(self, _config, local_workspace):
+        self._data = {}
+        self.config = _config
+        self.local_workspace = local_workspace
+        self.elfs = self.config['elfs']
+        # Dictionary with stats from trace files {address}=(times executed,
+        # inst size)
+        self.traces_stats = {}
+        # Dictionary of unique assembly line memory address against source
+        # file location
+        # {assembly address} = (opcode, source file location, line number in
+        # the source file, times executed)
+        self.asm_lines = {}
+        # Dictionary of {source file location}=>{'lines': {'covered':Boolean,
+        # 'elf_index'; {elf index}=>{assembly address}=>(opcode,
+        # times executed),
+        # 'functions': {function name}=>is covered(boolean)}
+        self.source_files_coverage = {}
+        self.functions = []
+        # Unique set of elf list of files
+        self.elf_map = {}
+        # For elf custom mappings
+        self.elf_custom = None
+
+    def process(self):
+        """
+        Public method to process the trace files and dwarf signatures
+        using the information contained in the json configuration file.
+        This method writes the intermediate json file output linking
+        the trace data and c source and assembly code.
+        """
+        self.source_files_coverage = {}
+        self.asm_lines = {}
+        # Initialize for unknown elf files
+        self.elf_custom = ELF_MAP["custom_offset"]
+        sources_config = {}
+        print("Generating intermediate json layer '{}'...".format(
+            self.config['parameters']['output_file']))
+        for elf in self.elfs:
+            # Gather information
+            elf_name = elf['name']
+            os_command("ls {}".format(elf_name))
+            # Trace data
+            self.traces_stats = load_stats_from_traces(elf['traces'])
+            prefix = self.config['parameters']['workspace'] \
+                if self.config['configuration']['remove_workspace'] else \
+                None
+            functions_list = list_of_functions_for_binary(elf_name)
+            (functions_list, excluded_functions) = apply_functions_exclude(
+                elf, functions_list)
+            # Produce code coverage
+            self.dump_sources(elf_name, functions_list, prefix)
+            sources_config = self.config['parameters']['sources']
+            # Now check code coverage in the functions with no dwarf signature
+            # (sources)
+            nf = {f: functions_list[f] for f in
+                  functions_list if not
+                  functions_list[f]["sources"]}
+            self.process_fn_no_sources(nf)
+            # Write to the intermediate json file
+        data = {"source_files": self.source_files_coverage,
+                "configuration": {
+                    "sources": sources_config,
+                    "metadata": "" if 'metadata' not in
+                                      self.config['parameters'] else
+                    self.config['parameters']['metadata'],
+                    "elf_map": self.elf_map
+                }
+                }
+        json_data = json.dumps(data, indent=4, sort_keys=True)
+        with open(self.config['parameters']['output_file'], "w") as f:
+            f.write(json_data)
+
+    def dump_sources(self, elf_filename, function_list, prefix=None):
+        """
+        Process an elf file i.e. match the source and asm lines against trace
+            files (coverage).
+
+        :param elf_filename: Elf binary file name
+        :param function_list: List of functions in the elf file i.e.
+                                [(address start, address end, function name)]
+        :param prefix: Optional path name to be removed at the start of source
+                        file locations
+        """
+        command = "%s -Sl %s" % (OBJDUMP, elf_filename)
+        dump = os_command(command)
+        dump += "\n"  # For pattern matching the last \n
+        elf_name = os.path.splitext(os.path.basename(elf_filename))[0]
+        # Object that handles the function line numbers in
+        # their filename
+        function_line_numbers = FunctionLineNumbers(self.local_workspace)
+        # To map the elf filename against an index
+        if elf_name not in self.elf_map:
+            if elf_name in ELF_MAP:
+                self.elf_map[elf_name] = ELF_MAP[elf_name]
+            else:
+                self.elf_map[elf_name] = self.elf_custom
+                self.elf_custom += 1
+        elf_index = self.elf_map[elf_name]
+        # The function groups have 2 elements:
+        # Function's block name, Function's block code
+        function_groups = re.findall(
+            r"(?s)[0-9a-fA-F]+ <([a-zA-Z0-9_]+)>:\n(.+?)(?:\r*\n\n|\n$)",
+            dump, re.DOTALL | re.MULTILINE)
+        # Pointer to files dictionary
+        source_files = self.source_files_coverage
+        for function_group in function_groups:
+            if len(function_group) != 2:
+                continue
+            block_function_name, block_code = function_group
+            block_code += "\n"
+            # Find if the function has C source code filename
+            function_signature_group = re.findall(
+                r"(?s){}\(\):\n(/.+?):[0-9]+.*(?:\r*\n\n|\n$)".format(
+                    block_function_name), block_code, re.DOTALL | re.MULTILINE)
+            if not function_signature_group:
+                continue  # Function does not have dwarf signature (sources)
+            function_list[block_function_name]["sources"] = True
+            block_function_source_file = remove_workspace(
+                function_signature_group[0], prefix)
+            fn_line_number = function_line_numbers.get_line_number(
+                block_function_source_file, block_function_name)
+            if block_function_source_file not in source_files:
+                source_files[block_function_source_file] = {"functions": {},
+                                                            "lines": {}}
+            source_files[block_function_source_file]["functions"][
+                block_function_name] = {"covered": False,
+                                        "line_number": fn_line_number}
+            # Now lets check the block code
+            # The source code groups have 5 elements:
+            # Function for the statements (optional), Source file for the asm
+            # statements,
+            # line number for the asm statements, asm statements, lookahead
+            # (ignored)
+            source_code_groups = re.findall(SOURCE_PATTERN, block_code,
+                                            re.DOTALL | re.MULTILINE)
+            is_function_block_covered = False
+            # When not present the last function name applies
+            statements_function_name = block_function_name
+            for source_code_group in source_code_groups:
+                if len(source_code_group) != 5:
+                    continue
+                fn_name, source_file, ln, asm_code, _ = source_code_group
+                if not fn_name:
+                    # The statement belongs to the most recent function
+                    fn_name = statements_function_name
+                else:
+                    # Usually in the first iteration fn_name is not empty and
+                    # is the function's name block
+                    statements_function_name = fn_name
+                if statements_function_name in function_list:
+                    # Some of the functions within a block are not defined in
+                    # the function list dump
+                    function_list[statements_function_name]["sources"] = True
+                statements_source_file = remove_workspace(source_file, prefix)
+                if statements_source_file not in source_files:
+                    source_files[statements_source_file] = {"functions": {},
+                                                            "lines": {}}
+                if statements_function_name not in \
+                        source_files[statements_source_file]["functions"]:
+                    fn_line_number = function_line_numbers.get_line_number(
+                        statements_source_file,
+                        statements_function_name)
+                    source_files[statements_source_file]["functions"][
+                        statements_function_name] = \
+                        {"covered": False, "line_number": fn_line_number}
+                if ln not in source_files[statements_source_file]["lines"]:
+                    source_files[statements_source_file]["lines"][ln] = \
+                        {"covered": False, "elf_index": {}}
+                source_file_ln = source_files[statements_source_file]["lines"][
+                    ln]
+                asm_line_groups = re.findall(
+                    r"(?s)([a-fA-F0-9]+):\t(.+?)(?:\n|$)",
+                    asm_code, re.DOTALL | re.MULTILINE)
+                for asm_line in asm_line_groups:
+                    if len(asm_line) != 2:
+                        continue
+                    hex_line_number, opcode = asm_line
+                    dec_address = int(hex_line_number, 16)
+                    times_executed = 0 if dec_address not in self.traces_stats \
+                        else self.traces_stats[dec_address][0]
+                    if times_executed > 0:
+                        is_function_block_covered = True
+                        source_file_ln["covered"] = True
+                        source_files[statements_source_file]["functions"][
+                            statements_function_name]["covered"] = True
+                    if elf_index not in source_file_ln["elf_index"]:
+                        source_file_ln["elf_index"][elf_index] = {}
+                    if dec_address not in \
+                            source_file_ln["elf_index"][elf_index]:
+                        source_file_ln["elf_index"][elf_index][dec_address] = (
+                            opcode, times_executed)
+            source_files[block_function_source_file]["functions"][
+                block_function_name]["covered"] |= is_function_block_covered
+
+    def process_fn_no_sources(self, function_list):
+        """
+        Checks function coverage for functions with no dwarf signature i.e
+         sources.
+
+        :param function_list: Dictionary of functions to be checked
+        """
+        if not FUNCTION_LINES_ENABLED:
+            return  # No source code at the workspace
+        address_seq = sorted(self.traces_stats.keys())
+        for function_name in function_list:
+            # Just check if the start address is in the trace logs
+            covered = function_list[function_name]["start"] in address_seq
+            # Find the source file
+            files = os_command(("grep --include *.c --include *.s -nrw '{}' {}"
+                                "| cut -d: -f1").format(function_name,
+                                                        self.local_workspace))
+            unique_files = set(files.split())
+            sources = []
+            line_number = 0
+            for source_file in unique_files:
+                d = get_function_line_numbers(source_file)
+                if function_name in d:
+                    line_number = d[function_name]
+                    sources.append(source_file)
+            if len(sources) > 1:
+                logger.warning("'{}' declared in {} files:{}".format(
+                    function_name, len(sources),
+                    ", ".join(sources)))
+            elif len(sources) == 1:
+                source_file = remove_workspace(sources[0],
+                                               self.local_workspace)
+                if source_file not in self.source_files_coverage:
+                    self.source_files_coverage[source_file] = {"functions": {},
+                                                               "lines": {}}
+                if function_name not in \
+                        self.source_files_coverage[source_file]["functions"] or \
+                        covered:
+                    self.source_files_coverage[source_file]["functions"][
+                        function_name] = {"covered": covered,
+                                          "line_number": line_number}
+            else:
+                logger.warning("Function '{}' not found in sources.".format(
+                    function_name))
+
+
+json_conf_help = """
+Produces an intermediate json layer for code coverage reporting
+using an input json configuration file.
+
+Input json configuration file format:
+{
+    "configuration":
+        {
+        "remove_workspace": <true if 'workspace' must be from removed from the
+                                path of the source files>,
+        "include_assembly": <true to include assembly source code in the
+                            intermediate layer>
+        },
+    "parameters":
+        {
+        "objdump": "<Path to the objdump binary to handle dwarf signatures>",
+        "readelf: "<Path to the readelf binary to handle dwarf signatures>",
+        "sources": [ <List of source code origins, one or more of the next
+                        options>
+                    {
+                    "type": "git",
+                    "URL":  "<URL git repo>",
+                    "COMMIT": "<Commit id>",
+                    "REFSPEC": "<Refspec>",
+                    "LOCATION": "<Folder within 'workspace' where this source
+                                is located>"
+                    },
+                    {
+                    "type": "http",
+                    "URL":  <URL link to file>",
+                    "COMPRESSION": "xz",
+                    "LOCATION": "<Folder within 'workspace' where this source
+                                is located>"
+                    }
+                ],
+        "workspace": "<Workspace folder where the source code was located to
+                        produce the elf/axf files>",
+        "output_file": "<Intermediate layer output file name and location>",
+        "metadata": {<Metadata objects to be passed to the intermediate json
+                    files>}
+        },
+    "elfs": [ <List of elf files to be traced/parsed>
+            {
+                    "name": "<Full path name to elf/axf file>",
+                    "traces": [ <List of trace files to be parsed for this
+                                elf/axf file>
+                                "Full path name to the trace file,"
+                              ]
+                }
+        ]
+}
+"""
+OBJDUMP = None
+READELF = None
+FUNCTION_LINES_ENABLED = None
+SOURCE_PATTERN = (r'(?s)([a-zA-Z0-0_]+)?(?:\(\):\n)?(^/.+?):([0-9]+)'
+                  r'(?: \(.+?\))?\n(.+?)(?=\n/|\n$|([a-zA-Z0-0_]+\(\):))')
+
+
+def main():
+    global OBJDUMP
+    global READELF
+    global FUNCTION_LINES_ENABLED
+
+    parser = argparse.ArgumentParser(epilog=json_conf_help,
+                                     formatter_class=RawTextHelpFormatter)
+    parser.add_argument('--config-json', metavar='PATH',
+                        dest="config_json", default='config_file.json',
+                        help='JSON configuration file', required=True)
+    parser.add_argument('--local-workspace', default="",
+                        help=('Local workspace folder where source code files'
+                              ' and folders resides'))
+    args = parser.parse_args()
+    try:
+        with open(args.config_json, 'r') as f:
+            config = json.load(f)
+    except Exception as ex:
+        print("Error at opening and processing JSON: {}".format(ex))
+        return
+    # Setting toolchain binary tools variables
+    OBJDUMP = config['parameters']['objdump']
+    READELF = config['parameters']['readelf']
+    # Checking if are installed
+    os_command("{} --version".format(OBJDUMP))
+    os_command("{} --version".format(READELF))
+
+    if args.local_workspace != "":
+        # Checking ctags installed
+        try:
+            os_command("ctags --version")
+        except BaseException:
+            print("Warning!: ctags not installed/working function line numbers\
+                    will be set to 0. [{}]".format(
+                "sudo apt install exuberant-ctags"))
+        else:
+            FUNCTION_LINES_ENABLED = True
+
+    pp = PostProcessCC(config, args.local_workspace)
+    pp.process()
+
+
+if __name__ == '__main__':
+    logging.basicConfig(filename='intermediate_layer.log', level=logging.DEBUG,
+                        format=('%(asctime)s %(levelname)s %(name)s '
+                                '%(message)s'))
+    logger = logging.getLogger(__name__)
+    start_time = time.time()
+    main()
+    elapsed_time = time.time() - start_time
+    print("Elapsed time: {}s".format(elapsed_time))
diff --git a/coverage-tool/coverage-reporting/merge.py b/coverage-tool/coverage-reporting/merge.py
new file mode 100755
index 0000000..e3d9d65
--- /dev/null
+++ b/coverage-tool/coverage-reporting/merge.py
@@ -0,0 +1,179 @@
+# !/usr/bin/env python
+###############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+###############################################################################
+
+###############################################################################
+# FILE: merge.py
+#
+# DESCRIPTION: Merge two or more .info and json files, sanitizing source file
+#              paths.
+#              If different .info files contain the same source code duplicated
+#              in different directories, we use the absolute paths of the
+#              first .info file.
+#
+###############################################################################
+
+
+import os
+import sys
+import argparse
+from argparse import RawTextHelpFormatter
+import subprocess
+import json
+
+
+# Define an argument parser using the argparse library
+parser = argparse.ArgumentParser(epilog="""Example of usage:
+python3 merge.py -a coverage_1.info -a coverage_2.info -o coverage_merge.info \
+-j input_file1.json -j input_file2.json -m merge_file.json
+
+It is possible to merge any number of files at once.
+If metadata json files are defined then they must pair with their
+corresponding info file, i.e. have the same name.
+If a local workspace is defined then the paths in the info files will
+be translated from the original test workspace to the local workspace
+to enable the usage of LCOV, but the original files will be kept intact.
+By default, the output file must be a new file.
+To overwrite an existing file, use the "--force" option.
+
+Note: the user is expected to merge .info files referring to the same project.
+If merging .info files from different projects, LCOV can be exploited directly
+using a command such as "lcov -rc lcov_branch_coverage=1 -a coverage_1.info \
+-a coverage_2.info -o coverage_merge.info."
+""", formatter_class=RawTextHelpFormatter)
+requiredNamed = parser.add_argument_group('required named arguments')
+requiredNamed.add_argument("-a", "--add-file",
+                           help="Input info file to be merged.",
+                           action='append', required=True)
+requiredNamed.add_argument("-o", "--output",
+                           help="Name of the output info (merged) file.",
+                           required=False)
+parser.add_argument("-j", "--json-file", action='append',
+                    help="Input json file to be merged.")
+parser.add_argument("-m", "--output-json",
+                    help="Name of the output json (merged) file.")
+parser.add_argument("--force", dest='force', action='store_true',
+                    help="force overwriting of output file.")
+parser.add_argument("--local-workspace", dest='local_workspace',
+                    help='Local workspace where source files reside.')
+
+options = parser.parse_args(sys.argv[1:])
+# At least two .info files are expected
+if len(options.add_file) < 2:
+    print('Error: too few input files.\n')
+    sys.exit(1)
+# The same number of info and json files expected
+if options.json_file:
+    if len(options.json_file) != len(options.add_file):
+        print('Umatched number of info and json files.\n')
+        sys.exit(1)
+
+file_groups = []
+info_files_to_merge = []
+# Check if files exist
+for file_name in options.add_file:
+    print("Merging '{}'".format(file_name))
+    if not os.path.isfile(file_name):
+        print('Error: file "' + file_name + '" not found.\n')
+        sys.exit(1)
+    if not file_name[-5:] == '.info':
+        print('Error: file "' + file_name +
+              '" has wrong extension. Expected .info file.\n')
+        sys.exit(1)
+    if file_name in info_files_to_merge:
+        print("Error: Duplicated info file '{}'".format(file_name))
+        sys.exit(1)
+    info_files_to_merge.append(file_name)
+    file_group = {"info": file_name, "locations": [], "json": ""}
+    info_name = os.path.basename(file_name).split(".")[0]
+    if options.json_file:
+        json_name = [i for i in options.json_file
+                     if os.path.basename(i).split(".")[0] == info_name]
+        if not json_name:
+            print("Umatched json file name for '{}'".format(file_name))
+            sys.exit(1)
+        json_name = json_name.pop()
+        if not json_name[-5:] == '.json':
+            print('Error: file "' + json_name +
+                  '" has wrong extension. Expected .json file.\n')
+            sys.exit(1)
+        if not os.path.isfile(json_name):
+            print('Error: file "' + json_name + '" not found.\n')
+            sys.exit(1)
+        # Now we have to extract the location folders for each info
+        # this is needed if we want translation to local workspace
+        file_group["json"] = json_name
+        with open(json_name) as json_file:
+            json_data = json.load(json_file)
+        locations = []
+        for source in json_data["configuration"]["sources"]:
+            locations.append(source["LOCATION"])
+        file_group["locations"] = locations
+    file_groups.append(file_group)
+
+# Check the extension of the output file
+if not options.output[-5:] == '.info':
+    print('Error: file "' + options.output +
+          '" has wrong extension. Expected .info file.\n')
+    sys.exit(1)
+
+if options.local_workspace is not None:
+    # Translation from test to local workspace
+    i = 0
+    while i < len(info_files_to_merge):
+        info_file = open(info_files_to_merge[i], "r")
+        print("Translating workspace for '{}'...".format(
+              info_files_to_merge[i]))
+        info_lines = info_file.readlines()
+        info_file.close()
+        common_prefix = os.path.normpath(
+            os.path.commonprefix([line[3:] for line in info_lines
+                                  if 'SF:' in line]))
+        temp_file = 'temporary_' + str(i) + '.info'
+        with open(temp_file, "w+") as f:
+            for line in info_lines:
+                cf = common_prefix
+                if os.path.basename(common_prefix) in file_groups[i]["locations"]:
+                    cf = os.path.dirname(common_prefix)
+                f.write(line.replace(cf, options.local_workspace))
+        info_files_to_merge[i] = temp_file  # Replace info file to be merged
+        i += 1
+
+# Merge json files
+if len(options.json_file):
+    json_merged_list = []
+    json_merged = {}
+    j = 0
+    while j < len(options.json_file):
+        json_file = options.json_file[j]
+        with open(json_file) as f:
+            data = json.load(f)
+        for source in data['configuration']['sources']:
+            if source not in json_merged_list:
+                json_merged_list.append(source)
+        j += 1
+    json_merged = {'configuration': {'sources': json_merged_list}}
+    with open(options.output_json, 'w') as f:
+        json.dump(json_merged, f)
+
+
+# Exploit LCOV merging capabilities
+# Example of LCOV usage: lcov -rc lcov_branch_coverage=1 -a coverage_1.info \
+# -a coverage_2.info -o coverage_merge.info
+command = ['lcov', '-rc', 'lcov_branch_coverage=1']
+
+for file_name in info_files_to_merge:
+    command.append('-a')
+    command.append(file_name)
+command.append('-o')
+command.append(options.output)
+
+subprocess.call(command)
+
+# Delete the temporary files
+if options.local_workspace is not None:
+    for f in info_files_to_merge:
+        os.remove(f)
diff --git a/coverage-tool/coverage-reporting/merge.sh b/coverage-tool/coverage-reporting/merge.sh
new file mode 100755
index 0000000..354dbc8
--- /dev/null
+++ b/coverage-tool/coverage-reporting/merge.sh
@@ -0,0 +1,417 @@
+#!/usr/bin/env bash
+
+##############################################################################
+# Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+##############################################################################
+
+#==============================================================================
+# FILE: merge.sh
+#
+# DESCRIPTION: Wrapper to merge intermediate json files and LCOV trace .info
+# files.
+#==============================================================================
+
+set -e
+#################################################################
+# Function to manipulate json objects.
+# The json object properties can be accessed through "." separated
+# property names. There are special characters that define a function
+# over a given property value:
+# If the qualifier list starts with '-' then is asking for the len of the
+# json array defined by the qualifiers.
+# If the qualifier list starts with '*' then the resulting json value
+# is returned without double quotes at the end and the beginning.
+# If some property name starts with "?" then is requesting if that
+# property exists within the json object.
+# Globals:
+#   None
+# Arguments:
+#   1-Json string that describes the json object
+#   2-String of '.' separated qualifiers to access properties
+#       within the json object
+#   3- Optional default value for a sought property value
+# Outputs:
+#   None
+################################################################
+get_json_object() {
+  export _json_string="$1"
+  export _qualifiers="$2"
+  export _default="$3"
+  python3 - << EOT
+import os
+import json
+import sys
+
+_json_string = os.getenv("_json_string", "")
+_qualifiers = os.getenv("_qualifiers", "")
+_default = os.getenv("_default", "")
+try:
+    data = json.loads(_json_string)
+except Exception as ex:
+    print("Error decoding json string:{}".format(ex))
+    sys.exit(-1)
+ptr = data
+if _qualifiers[0] in ['-', '*']:
+    cmd = _qualifiers[0]
+    _qualifiers = _qualifiers[1:]
+else:
+    cmd = ""
+for _name in _qualifiers.split("."):
+    if _name in ptr:
+        ptr = ptr[_name]
+    elif _name.isdigit() and int(_name) < len(ptr):
+        ptr = ptr[int(_name)]
+    elif _name.startswith("?"):
+        print(_name[1:] in ptr)
+        sys.exit(0)
+    elif _default:
+        print(_default)
+        sys.exit(0)
+    else:
+        print("'{}' is not in the json object".format(_name))
+        sys.exit(-1)
+if cmd == "-":
+    # return len of the json array
+    print(len(ptr))
+elif cmd == "*":
+    #remove quotes
+    string = json.dumps(ptr)
+    if string.startswith('"') and string.endswith('"'):
+        string = string[1:-1]
+    print(string)
+else:
+    print(json.dumps(ptr))
+EOT
+}
+
+#################################################################
+# Convert a relative path to absolute path
+# Globals:
+#   None
+# Arguments:
+#   1-Path to be converted
+# Outputs:
+#   Absolute path
+################################################################
+get_abs_path() {
+  path="$1"
+  echo "$(cd $(dirname $path) && echo "$(pwd -P)"/$(basename $path))"
+}
+
+#################################################################
+# Clone the source files
+# Globals:
+#   None
+# Arguments:
+#   1-Json file with the sources to be cloned
+#   2-Folder where to clone the sources
+# Outputs:
+#   None
+################################################################
+clone_repos() {
+  export OUTPUT_JSON="$1"
+  export CSOURCE_FOLDER="${2:-$LOCAL_WORKSPACE}"
+
+  cd $DIR # To be run at the same level of this script
+python3 - << EOT
+import os
+import clone_sources
+
+output_file = os.getenv('OUTPUT_JSON', 'output_file.json')
+source_folder = os.getenv('CSOURCE_FOLDER', 'source')
+try:
+    r = clone_sources.CloneSources(output_file)
+    r.clone_repo(source_folder)
+except Exception as ex:
+    print(ex)
+EOT
+	cd -
+}
+
+#################################################################
+# Get the a file defined in the json object
+# Globals:
+#   None
+# Arguments:
+#   1-Json object that defines the locations of the info and json
+#       files
+#   2-Folder to save the info and json files
+#   3-Variable that holds the name of the variable that will hold
+#       the name of the file to be downloaded (reference argument)
+# Outputs:
+#   None
+################################################################
+get_file() {
+  json_object="$1"
+  where="$2"
+  var_name="${3:-param_cloned}" # Defaults to globar var
+
+  local _type=$(get_json_object "$json_object" "type")
+  local _origin=$(get_json_object "$json_object" "*origin")
+  local _compression=$(get_json_object "$json_object" "*compression" None)
+  local fname=""
+  local cloned_file=""
+  local full_filename=$(basename -- "$_origin")
+  local extension="${full_filename##*.}"
+  local filename="${full_filename%.*}"
+
+  if [ "$_type" = '"http"' ];then
+    fname="$where.$extension" # Same filename as folder
+    rm $where/$fname &>/dev/null || true
+    wget -o error.log $_origin -O $where/$fname || (
+			cat error.log && exit -1)
+    cloned_file="$(get_abs_path $where/$fname)"
+  elif [ "$_type" = '"bundle"' ];then
+    # Check file exists at origin, i.e. was unbundled before
+    fname="$_origin"
+    if [ -f "$where/$fname" ];then
+        cloned_file="$(get_abs_path $where/$fname)"
+    fi
+  elif [ "$_type" = '"file"' ];then
+	if [[ "$_origin" = http* ]]; then
+		echo "$_origin looks like 'http' rather than 'file' please check..."
+		exit -1
+	fi
+    fname="$where.$extension" # Same filename as folder
+    cp -f $_origin $where/$fname
+    cloned_file="$(get_abs_path $where/$fname)"
+  else
+    echo "Error unsupported file type:$_type.... Aborting."
+    exit -1
+  fi
+  if [ "$_compression" = "tar.xz" ];then
+    cd $where
+    pwd
+    tar -xzf $fname
+    rm -f $fname
+    cd -
+  fi
+  eval "${var_name}=${cloned_file}"
+}
+
+#####################################################################
+# Get (download/copy) info and json files from the input json file
+# Globals:
+#   merge_input_json_file: Input json file with locations of info
+#                          and intermediate json files to be merged.
+#   input_folder: Folder to put info and json files to be merged
+# Arguments:
+#   None
+# Outputs:
+#   None
+###################################################################
+get_info_json_files() {
+  json_string="$(cat $merge_input_json_file)"
+  nf=$(get_json_object "$json_string" "-files")
+  rm -rf $input_folder > /dev/null || true
+  mkdir -p $input_folder
+  for f in $(seq 0 $(($nf - 1)));
+  do
+    pushd $input_folder > /dev/null
+    _file=$(get_json_object "$json_string" "files.$f")
+    folder=$(get_json_object "$_file" "*id")
+    echo "Geting files from project '$folder' into '$input_folder'..."
+    mkdir -p $folder
+    bundles=$(get_json_object "$_file" "bundles" None)
+    if [ "$bundles" != "None" ];then
+      nb=$(get_json_object "$_file" "-bundles")
+      for n in $(seq 0 $(($nb - 1)));
+      do
+        get_file "$(get_json_object "$bundles" "$n")" $folder
+      done
+    fi
+    get_file "$(get_json_object "$_file" "config")" $folder config_json_file
+    get_file "$(get_json_object "$_file" "info")" $folder info_file
+    popd > /dev/null
+  done
+}
+
+#################################################################
+# Merge json and info files and generate branch coverage report
+# Globals:
+#   output_coverage_file: Location and name for merge coverage info
+#   output_json_file: Location and name for merge json output
+#   input_folder: Location where reside json and info files
+#   LOCAL_WORKSPACE: Local workspace folder with the source files
+# Arguments:
+#   None
+# Outputs:
+#   Output merge coverage file
+#   Output merge json file
+################################################################
+merge_files() {
+# Merge info and json files
+  local lc=" "
+  if [ -n "$LOCAL_WORKSPACE" ];then
+    # Translation to be done in the info files to local workspace
+    lc=" --local-workspace $LOCAL_WORKSPACE"
+  fi
+  # Getting the path of the merge.py must reside at the same
+  # path as the merge.sh
+  python3 ${DIR}/merge.py \
+      $(find $input_folder -name "*.info" -exec echo "-a {}" \;) \
+      $(find $input_folder -name "*.json" -exec echo "-j {}" \;) \
+      -o $output_coverage_file \
+      -m $output_json_file \
+      $lc
+
+}
+
+
+#################################################################
+# Print scripts usage
+# Arguments:
+#   None
+# Outputs:
+#   Prints to stdout script usage
+################################################################
+usage() {
+  clear
+  echo "Usage:"
+  echo "merge -h              Display this help message."
+  echo "-j <input json file>  Input json file(info and intermediate json files to be merged)."
+  echo "-l <report folder>    Folder for branch coverage report. Defaults to ./lcov_folder."
+  echo "-i <Path>             Folder to copy/download info and json files. Defaults to ./input."
+  echo "-w <Folder>           Local workspace folder for source files."
+  echo "-o <name>             Name of the merged info file. Defaults to ./coverage_merge.info"
+  echo "-m <name>             Name of the merged metadata json file. Defaults to ./merge_output.json"
+  echo "-c                    If it is set, sources from merged json files will be cloned/copied to local workspace folder."
+  echo "$help_message"
+}
+
+help_message=$(cat <<EOF
+
+# The script that merges the info data (code coverage) and json metadata
+# (intermediate layer) needs as an input a json file with the following
+# properties:
+# files: array of objects that describe the type of file/project to be
+# merged.
+#   id: Unique identifier (project) associated to the info and
+#       intermediate json files
+#   config: Intermediate json file
+#       type: Type of storage for the file. (http or file)
+#       origin: Location (url or folder) of the file
+#   info:  Info file
+#       type: Type of storage for the file. (http or file)
+#       origin: Location (url or folder) of the file
+# Example:
+{ "files" : [
+                {
+                    "id": "<project 1>",
+                    "config":
+                        {
+                            "type": "http",
+                            "origin": "<URL of json file for project 1>"
+                        },
+                    "info":
+                        {
+                            "type": "http",
+                            "origin": "<URL of info file for project 1>"
+                        }
+                },
+                {
+                    "id": "<project 2>",
+                    "config":
+                        {
+                            "type": "http",
+                            "origin": "<URL of json file for project 2>"
+                        },
+                    "info":
+                        {
+                            "type": "http",
+                            "origin": "<URL of info file for project 2>"
+                        }
+                },
+                .
+                .
+                .
+        ]
+}
+EOF
+)
+
+clear
+# Local workspace folder to contain source files
+LOCAL_WORKSPACE=""
+# If this is true then will clone/copy sources from merged json
+# file into local workspace
+CLONE_SOURCES=false
+# Location of the input json file that contains information about
+# the info and json files to be merged and produced a report
+merge_input_json_file=""
+# Folder to download json and info files
+input_folder="./input_folder"
+# Folder to to put the reports
+LCOV_FOLDER="./lcov_folder"
+# File name for merge coverage info
+output_coverage_file="./coverage_merge.info"
+# File name for merge json output
+output_json_file="./merge_output.json"
+while getopts ":hj:o:l:w:i:cm:" opt; do
+  case ${opt} in
+    h )
+      usage
+      exit 0
+      ;;
+    w )
+      LOCAL_WORKSPACE=$(cd $OPTARG; pwd)
+      ;;
+    i )
+      input_folder=$OPTARG
+      ;;
+    c )
+      CLONE_SOURCES=true
+      ;;
+    j )
+      merge_input_json_file=$OPTARG
+      ;;
+    l )
+      LCOV_FOLDER=$OPTARG
+      ;;
+    o )
+      output_coverage_file=$OPTARG
+      ;;
+    m )
+      output_json_file=$OPTARG
+      ;;
+    \? )
+      echo "Invalid option: $OPTARG" 1>&2
+      usage
+      exit -1
+      ;;
+    : )
+      echo "Invalid option: $OPTARG requires an argument" 1>&2
+      usage
+      exit -1
+      ;;
+  esac
+done
+shift $((OPTIND -1))
+if [ -z "$merge_input_json_file" ]; then
+  echo "Input json file required"
+  usage
+  exit -1
+fi
+if [ -z "$LOCAL_WORKSPACE" ] && [ $CLONE_SOURCES = true ]; then
+	echo "Need to define a local workspace folder to clone/copy sources!"
+	exit -1
+fi
+# Getting the script folder where other script files must reside, i.e
+# merge.py, clone_sources.py
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+input_folder="$(get_abs_path $input_folder)"
+LCOV_FOLDER="$(get_abs_path  $LCOV_FOLDER)"
+output_coverage_file="$(get_abs_path $output_coverage_file)"
+output_json_file="$(get_abs_path $output_json_file)"
+param_cloned=""
+get_info_json_files
+merge_files
+if [ $CLONE_SOURCES = true ];then
+	clone_repos $output_json_file
+fi
+# Generate branch coverage report
+genhtml --branch-coverage $output_coverage_file \
+    --output-directory $LCOV_FOLDER
+cd -