Sync Coverity scripts with internal CI
Sync the scripts with platform-ci commit:
539c151d0cd99a5e6ca6c0e6966f6d8579fe864e
Signed-off-by: Zelalem <zelalem.aweke@arm.com>
Change-Id: I260c3490cc5079b66b735b03116af56501fa0642
diff --git a/script/coverity-Makefile b/script/coverity-Makefile
new file mode 100644
index 0000000..a96cc82
--- /dev/null
+++ b/script/coverity-Makefile
@@ -0,0 +1,141 @@
+#
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+#NOTE: stdout is a log file; &3 prints to the terminal
+#NOTE: This makefile must be run from within the workspace coverity directory
+
+# Remove all default rules
+.SUFFIXES :
+
+cov_dir=coverity
+cov_config=${cov_dir}/config/config.xml
+report=--preview-report-v2 "${cov_dir}/report.json"
+
+golden_cov=--dir "${cov_dir}/golden"
+golden_repo=${cov_dir}/golden-repo
+golden_snapshot=${cov_dir}/golden-snapshot
+golden_analyze=${cov_dir}/golden.txt
+golden_hash=${cov_dir}/golden.hash
+
+branch_cov=--dir "${cov_dir}/branch"
+branch_repo=${cov_dir}/branch-repo
+branch_snapshot=${cov_dir}/branch-snapshot
+branch_analyze=${cov_dir}/branch.txt
+branch_hash=${cov_dir}/branch.hash
+
+stream_name?=${BUILD_CONFIG}
+
+cov-common-args= --host "${coverity_host}" --auth-key-file "${auth_file}"
+cov-manage-args= ${cov-common-args} --ssl --port "${coverity_port}"
+cov-manage=cov-manage-im ${cov-manage-args} --mode
+cov-commit-args= ${cov-common-args} --https-port "${coverity_port}"
+cov-commit=cov-commit-defects ${cov-commit-args} --stream "${stream_name}"
+cov-build=MAKEFLAGS= cov-build --config ${cov_config}
+cov-analyze=cov-analyze --verbose 0
+cov-errors=cov-format-errors
+
+# Make idiom to force a rule to be run. This works by because empty rule does
+# not create its target, which will always run and force all reverse-deps to run
+force: ;
+
+# This is forced because on the second invocation of make will already have the
+# ${golden_hash} file.
+golden-setup: ${golden_hash} force
+ git clone -q "${tf_root}" "${golden_repo}" -b "${golden_ref}"
+ git -C "${golden_repo}" checkout `cat ${golden_hash}` 2>&1
+ echo "golden: ${golden_url} ${golden_ref}" >&3
+
+# Note: If we don't have an "integration" branch then we're probably the CI and
+# have a shallow clone
+${golden_hash}:
+ git -C "${tf_root}" rev-parse "${golden_ref}" >/dev/null 2>/dev/null \
+ || ( git -C "${tf_root}" fetch "${golden_url}" "${golden_ref}" \
+ && git -C "${tf_root}" branch "${golden_ref}" FETCH_HEAD \
+ && git -C "${tf_root}" fetch -q --unshallow "${golden_url}" )
+ git -C "${tf_root}" merge-base HEAD "${golden_ref}" > $@
+ echo "golden: `cat $@`" >&3
+
+ifneq (${cov_force_commit}, 1)
+# The following rule runs this makefile recursively, with ${cov_force_commit}=1,
+# when a suitable snapshot could not be found. This will cause the else branch
+# of this if to be taken and a new snapshot will be created on the server.
+${golden_snapshot}: ${golden_hash}
+ "${ci_root}/script/get_latest_snapshot.py" ${cov-commit-args} \
+ --description "${description}" --version "`cat ${golden_hash}`" \
+ "${stream_name}" > "${golden_snapshot}" \
+ || $(MAKE) ${golden_snapshot} cov_force_commit=1 \
+ -f ${ci_root}/script/coverity-Makefile
+ echo "golden: snapshot ID `cat ${golden_snapshot}` exists" >&3
+else
+${golden_snapshot}: stream-setup ${golden_analyze} ${golden_hash} golden-setup
+ ${cov-commit} ${golden_cov} \
+ --description "${description}" \
+ --version "`cat ${golden_hash}`" \
+ --snapshot-id-file "${golden_snapshot}"
+ echo "golden: new snapshot ID: `cat ${golden_snapshot}`" >&3
+endif
+
+# Create a copy of the user's repo to avoid cleaining their repo
+branch-setup:
+ git clone -q "${tf_root}" "${branch_repo}"
+ rsync -a --exclude=".git" --exclude "**.o" --exclude "**.d" \
+ "${tf_root}/" "${branch_repo}"
+ifneq (${dont_clean}, 1)
+ MAKEFLAGS= make -C "${branch_repo}" distclean
+endif
+
+${branch_hash}: branch-setup
+ git -C ${branch_repo} show -q --format=%H > $@
+ echo "branch: `cat $@`" >&3
+
+# This is allowed to fail, as there may not be a stream for every possible build
+# combination. Failure indicates that the stream already exists.
+stream-setup:
+ -${cov-manage} streams --add --set "name:${stream_name}" \
+ && ${cov-manage} projects --name "Arm Trusted Firmware" --update \
+ --insert "stream:${stream_name}"
+
+${cov_config}:
+ cov-configure --comptype gcc --template --compiler "${cov_compiler}" \
+ --config "${cov_config}"
+
+${golden_analyze}: golden-setup ${cov_config}
+ ${cov-build} ${golden_cov} make -C ${golden_repo} ${SUBMAKE}
+ ${cov-analyze} ${golden_cov} ${cov_options} \
+ --strip-path "$(realpath ${golden_repo})" > ${golden_analyze}
+
+${branch_analyze}: branch-setup ${cov_config} ${branch_hash}
+ ${cov-build} ${branch_cov} make -C ${branch_repo} ${SUBMAKE}
+ ${cov-analyze} ${branch_cov} ${cov_options} \
+ --strip-path "$(realpath ${branch_repo})" > ${branch_analyze}
+
+branch-cov-commit-defects: stream-setup ${branch_analyze} ${branch_hash}
+ ${cov-commit} ${branch_cov} --description "${description}" \
+ --version "`cat ${branch_hash}`" \
+ --snapshot-id-file "${branch_snapshot}"
+ echo "branch: new snapshot ID: `cat ${branch_snapshot}`" >&3
+
+
+ifdef cov_force_commit
+full-commit-defects-dep = branch-cov-commit-defects
+else
+full-commit-defects-dep =
+endif
+
+${cov_dir}/diff.json: ${branch_analyze} ${golden_snapshot}
+ ${cov-commit} ${branch_cov} --preview-report-v2 $@ \
+ --comparison-snapshot-id "`cat ${golden_snapshot}`"
+
+${cov_dir}/full.json: ${full-commit-defects-dep} ${branch_analyze}
+ ${cov-errors} ${branch_cov} --json-output-v7 ${cov_dir}/full.json
+
+%-defects.txt: ${cov_dir}/%.json
+ -python3 ${ci_root}/script/coverity_parser.py $^ \
+ --output defects.json --totals defects-summary.txt > $@
+
+
+
+branch-report-full: full-defects.txt
+branch-report-compare: diff-defects.txt
diff --git a/script/coverity_parser.py b/script/coverity_parser.py
index 5048348..7b428b7 100644
--- a/script/coverity_parser.py
+++ b/script/coverity_parser.py
@@ -1,11 +1,12 @@
#!/usr/bin/env python3
#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import argparse
+import collections
import json
import re
import shutil
@@ -108,6 +109,16 @@
"description": orig_issue["mainEventDescription"]
}
+def _new_issue_v7(cid, checker, issue):
+ return {
+ "cid": cid,
+ "file": issue["strippedFilePathname"],
+ "line": issue["lineNumber"],
+ "checker": checker,
+ "classification": _classify_checker(checker),
+ "description": issue["eventDescription"],
+ }
+
def _cls_string(issue):
cls = issue["classification"]
@@ -123,30 +134,68 @@
str(i["cid"]).zfill(5))
-# Iterate through all issues that are not ignored. If show_all is set, only
-# issues that are not in the comparison snapshot are returned.
-def iter_issues(path, show_all=False):
- with open(path, encoding="utf-8") as fd:
- report = json.load(fd)
- # Unconditional filter
- filters = [lambda i: ((i["triage"]["action"] != "Ignore") and
- (i["occurrences"][0]["checker"] not in _rule_exclusions))]
- # Whether we need diffs only
- if not show_all:
- # Pick only issues that are not present in comparison snapshot
- filters.append(lambda i: not i["presentInComparisonSnapshot"])
+class Issues(object):
+ """An iterator over issue events that collects a summary
- # Pick issue when all filters are true
- filter_func = lambda i: all([f(i) for f in filters])
+ After using this object as an iterator, the totals member will contain a
+ dict that maps defect types to their totals, and a "total" key with the
+ total number of defects in this scan.
+ """
+ def __init__(self, path, show_all):
+ self.path = path
+ self.show_all = show_all
+ self.iterated = False
+ self.totals = collections.defaultdict(int)
+ self.gen = None
- # Top-level is a group of issues, all sharing a common CID
- for issue_group in filter(filter_func, report["issueInfo"]):
- # Pick up individual occurrence of the CID
- for occurrence in issue_group["occurrences"]:
- yield _new_issue(issue_group["cid"], occurrence)
+ def iter_issues_v1(self, report):
+ # Unconditional filter
+ filters = [lambda i: ((i["triage"]["action"] != "Ignore") and
+ (i["occurrences"][0]["checker"] not in _rule_exclusions))]
+ # Whether we need diffs only
+ if not self.show_all:
+ # Pick only issues that are not present in comparison snapshot
+ filters.append(lambda i: not i["presentInComparisonSnapshot"])
+
+ # Pick issue when all filters are true
+ filter_func = lambda i: all([f(i) for f in filters])
+
+ # Top-level is a group of issues, all sharing a common CID
+ for issue_group in filter(filter_func, report["issueInfo"]):
+ # Pick up individual occurrence of the CID
+ self.totals[_classify_checker(occurrence["checkerName"])] += 1
+ self.totals["total"] += 1
+ for occurrence in issue_group["occurrences"]:
+ yield _new_issue(issue_group["cid"], occurrence)
+
+ def iter_issues_v7(self, report):
+ # TODO: filter by triage and action
+ f = lambda i: i["checkerName"] not in _rule_exclusions
+ for issue_group in filter(f, report["issues"]):
+ self.totals[_classify_checker(issue_group["checkerName"])] += 1
+ self.totals["total"] += 1
+ for event in issue_group["events"]:
+ yield _new_issue_v7(
+ issue_group.get("cid"),
+ issue_group["checkerName"],
+ event
+ )
+
+ def _gen(self):
+ with open(self.path, encoding="utf-8") as fd:
+ report = json.load(fd)
+ if report.get("formatVersion", 0) >= 7:
+ return self.iter_issues_v7(report)
+ else:
+ return self.iter_issues_v1(report)
+
+ def __iter__(self):
+ if self.gen is None:
+ self.gen = self._gen()
+ yield from self.gen
# Format issue (returned from iter_issues()) as text.
def format_issue(issue):
@@ -169,6 +218,13 @@
</tr>""".format_map(dict(issue, cls=cls, cov_class=cov_class))
+TOTALS_FORMAT = str.strip("""
+TotalDefects: {total}
+MandatoryDefects: {mandatory}
+RequiredDefects: {required}
+AdvisoryDefects: {advisory}
+""")
+
if __name__ == "__main__":
parser = argparse.ArgumentParser()
@@ -176,13 +232,15 @@
action="store_const", const=True, help="List all issues")
parser.add_argument("--output",
help="File to output filtered defects to in JSON")
+ parser.add_argument("--totals",
+ help="File to output total defects in flat text")
parser.add_argument("json_report")
opts = parser.parse_args()
+ issue_cls = Issues(opts.json_report, opts.show_all)
issues = []
- for issue in sorted(iter_issues(opts.json_report, opts.show_all),
- key=lambda i: make_key(i)):
+ for issue in sorted(issue_cls, key=lambda i: make_key(i)):
print(format_issue(issue))
issues.append(issue)
@@ -191,4 +249,8 @@
with open(opts.output, "wt") as fd:
fd.write(json.dumps(issues))
+ if opts.totals:
+ with open(opts.totals, "wt") as fd:
+ fd.write(TOTALS_FORMAT.format_map(issue_cls.totals))
+
sys.exit(int(len(issues) > 0))
diff --git a/script/coverity_wrapper.sh b/script/coverity_wrapper.sh
index 97b5345..626232d 100644
--- a/script/coverity_wrapper.sh
+++ b/script/coverity_wrapper.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -20,27 +20,17 @@
# Coverity analysis involves contacting the server, which have shown to be very
# slow. Depending on the type of analysis performed, we might have to do
# analysis more than once, and doing that in series would only increase the turn
-# around time. To mitigate this, all Coverity commands are saved as small
-# snippets, and are then called from a Makefile. Make take care of running
-# commands in parallel (all this at the expense of readability).
+# around time. To mitigate this, all Coverity commands, are called from a
+# Makefile, and run in parallel.
coverity_wrapper() {
local cov_dir="$workspace/coverity"
- local cov_config="$cov_dir/config"
local cov_compiler="${cov_compiler:-${CROSS_COMPILE}gcc}"
- local golden_repo="$cov_dir/golden-repo"
- local golden_snapshot="$cov_dir/golden-snapshot"
-
- local branch_repo="$cov_dir/branch-repo"
- local branch_snapshot="$cov_dir/branch-snapshot"
-
local auth_file="${cov_auth_file:-$ci_root/coverity/tfcibot@$coverity_host}"
- local makefile="$workspace/makefile.cov"
- local snippets_dir="$cov_dir/snippets"
- local stream_name="${BUILD_CONFIG:?}"
+ local makefile="$ci_root/script/coverity-Makefile"
+ local defects_summary="$workspace/defects-summary.txt"
- local ref_arg
local description
local need_compare
@@ -51,282 +41,20 @@
need_compare=1
local golden_url="${cov_golden_url:-$tf_src_repo_url}"
local golden_ref="${cov_golden_ref:-master}"
+ local defects_file="$workspace/diff-defects.txt"
+ else
+ local defects_file="$workspace/full-defects.txt"
fi
if upon "$local_ci"; then
description="$USER-local ${cov_checker:?}"
- # Reference repository can't be shallow
- if [ ! -f "$tf_root/.git/shallow" ]; then
- ref_arg="--reference $tf_root"
- fi
else
description="$JOB_NAME#$BUILD_NUMBER ${cov_checker:?}"
- ref_arg="--reference $project_filer/ref-repos/trusted-firmware"
fi
# Create a stream and assign to Trusted Firmware project
chmod 400 "$auth_file"
- mkdir -p "$snippets_dir"
- cat <<EOF >"$makefile"
-SHELL := /bin/bash
-
-define run-snippet
-echo ":\$@" >&3
-echo ">\$@: \$\$(date)"
-if ! bash -ex $snippets_dir/\$@; then \\
- echo " :\$@ failed! See build log" >&3; \\
- exit 1; \\
-fi
-echo "<\$@: \$\$(date)"
-endef
-
-EOF
-
- create_snippet() {
- # Create a script snippet
- cat >"$snippets_dir/${name?}"
-
- # Add a rule to the makefile
- cat <<EOF >>"$makefile"
-$name:${deps:+ $deps}
- @\$(run-snippet)
-
-EOF
- }
-
- # golden-setup. Additionally query for a snapshot ID corresponding to
- # this version in the stream. If a snapshot ID exists, the comparison
- # file is generated containing the snapshot ID.
- #
- # We need to make a shallow clone of the repository first in order to
- # get the reference, however. And, if later we find needing a fresh
- # snapshot, we unshallow that.
- cat <<EOF | name="golden-setup" create_snippet
-git clone --depth 1 -q $ref_arg "$golden_url" "$golden_repo"
-cd -P "$golden_repo"
-git fetch --depth 1 -q origin "$golden_ref"
-git checkout -q FETCH_HEAD
-
-if [ -z "$cov_force_commit" ]; then
- "$ci_root/script/get_latest_snapshot.py" \\
- --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --file "$golden_snapshot" \\
- --description "*$cov_checker*" \\
- --version "\$(git show -q --format=%H)" \\
- "$stream_name" 2>&3 || true
-fi
-
-{
-echo " golden: $golden_url $golden_ref"
-echo " golden: \$(git show -q --format=%H)"
-} >&3
-
-if [ -f "$golden_snapshot" ]; then
- echo " golden: snapshot ID \$(cat $golden_snapshot) exists" >&3
-else
- git fetch -q --unshallow origin
-fi
-EOF
-
-
- # Setup branch
- if upon "$local_ci"; then
- if not_upon "$need_compare"; then
- ln -s "$tf_root" "$branch_repo"
-
- # Run scanning as-is since we don't need a comparison.
- cat <<EOF | name="branch-setup" create_snippet
-if [ "$dont_clean" != 1 ]; then
- cd -P "$branch_repo"
- MAKEFLAGS= make distclean
-fi
-EOF
- else
- # Running comparison means that we need to make a merge
- # commit. It's undesirable to do that on the user's
- # working copy, so do it on a separate one.
- cat <<EOF | name="branch-setup" create_snippet
-git clone -q $ref_arg "$tf_src_repo_url" "$branch_repo"
-cd -P "$branch_repo"
-git checkout -b cov-branch origin/master
-rsync -a --exclude=".git" --exclude "**.o" --exclude "**.d" "$tf_root/" .
-git add .
-git -c user.useconfigonly=false commit --allow-empty -q -m "Test branch"
-git checkout master
-git -c user.useconfigonly=false merge --no-ff -q cov-branch
-
-git remote add golden "$golden_url"
-git fetch -q golden "$golden_ref"
-git checkout -q -b cov-golden FETCH_HEAD
-git -c user.useconfigonly=false merge --no-edit --no-ff -q cov-branch
-EOF
- fi
- else
- # Use the local checkout at $tf_root for analysing branch and
- # golden together
- ln -s "$tf_root" "$branch_repo"
-
- cat <<EOF | name="branch-setup" create_snippet
-if [ "$need_compare" ]; then
- cd -P "$branch_repo"
- if [ -f ".git/shallow" ]; then
- git fetch -q --unshallow origin
- fi
- git remote add golden "$golden_url"
- git fetch -q golden $golden_ref
- git branch cov-branch HEAD
- git checkout -q -b cov-golden FETCH_HEAD
- echo " branch: \$(git show -q --format=%H cov-branch)" >&3
- git -c user.useconfigonly=false merge --no-edit --no-ff -q cov-branch
-fi
-EOF
- fi
-
-
- # Setup stream
- cat <<EOF | name="stream-setup" create_snippet
-if cov-manage-im --mode streams --add --set "name:$stream_name" \\
- --auth-key-file "$auth_file" \\
- --host "$coverity_host" --ssl --port "$coverity_port"; then
- cov-manage-im --mode projects --name "Arm Trusted Firmware" --update \\
- --insert "stream:$stream_name" --auth-key-file "$auth_file" \\
- --host "$coverity_host" --ssl --port "$coverity_port"
-fi
-EOF
-
-
- # Coverity configuration
- cat <<EOF | name="cov-config" create_snippet
-cov-configure --comptype gcc --template --compiler "$cov_compiler" \\
- --config "$cov_config/config.xml"
-EOF
-
-
- # cov-build on golden; only performed if a comparison file doesn't
- # exist.
- cat <<EOF | name="golden-cov-build" deps="cov-config golden-setup" \
- create_snippet
-if [ ! -f "$golden_snapshot" -o -n "$cov_force_commit" ]; then
- cd -P "$golden_repo"
- MAKEFLAGS= cov-build --config "$cov_config/config.xml" \\
- --dir "$cov_dir/golden" $@
-else
- echo " golden: cov-build skipped" >&3
-fi
-EOF
-
-
- # cov-analyze on golden; only performed if a comparison file doesn't
- # exist.
- cat <<EOF | name="golden-cov-analyze" deps="golden-cov-build" \
- create_snippet
-if [ ! -f "$golden_snapshot" -o -n "$cov_force_commit" ]; then
- cd -P "$golden_repo"
- cov-analyze --dir "$cov_dir/golden" $cov_options --verbose 0 \\
- --strip-path "\$(pwd -P)" \\
- --redirect "stdout,$cov_dir/golden.txt"
-else
- echo " golden: cov-analyze skipped" >&3
-fi
-EOF
-
-
- # cov-commit-defects on golden. Since more than one job could have
- # started analyzing golden after finding the snapshot misssing, we check
- # for a snapshot again, and a commit only performed if a comparison file
- # doesn't exist.
- cat <<EOF | name="golden-cov-commit-defects" \
- deps="stream-setup golden-cov-analyze" create_snippet
-if [ ! -f "$golden_snapshot" -a -z "$cov_force_commit" ]; then
- "$ci_root/script/get_latest_snapshot.py" \\
- --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --file "$golden_snapshot" \\
- --description "*$cov_checker*" \\
- --version "\$(git show -q --format=%H)" \\
- "$stream_name" 2>&3 || true
- retried=1
-fi
-
-if [ ! -f "$golden_snapshot" -o -n "$cov_force_commit" ]; then
- cd -P "$golden_repo"
- cov-commit-defects --dir "$cov_dir/golden" --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --stream "$stream_name" --auth-key-file "$auth_file" \\
- --version "\$(git show -q --format=%H)" \\
- --description "$description" \\
- --snapshot-id-file "$golden_snapshot"
- echo " golden: new snapshot ID: \$(cat $golden_snapshot)" >&3
-elif [ "\$retried" ]; then
- {
- echo " golden: snapshot ID \$(cat $golden_snapshot) now exists"
- echo " golden: cov-commit-defects skipped"
- } >&3
-else
- echo " golden: cov-commit-defects skipped" >&3
-fi
-EOF
-
-
- # cov-build on branch
- cat <<EOF | name="branch-cov-build" deps="cov-config branch-setup" \
- create_snippet
-cd -P "$branch_repo"
-MAKEFLAGS= cov-build --config "$cov_config/config.xml" --dir "$cov_dir/branch" $@
-EOF
-
-
- # cov-analyze on branch
- cat <<EOF | name="branch-cov-analyze" deps="branch-cov-build" \
- create_snippet
-cd -P "$branch_repo"
-cov-analyze --dir "$cov_dir/branch" $cov_options --verbose 0 \\
- --strip-path "\$(pwd -P)" \\
- --redirect "stdout,$cov_dir/branch.txt"
-EOF
-
-
- # cov-commit-defects on branch
- cat <<EOF | name="branch-cov-commit-defects" \
- deps="stream-setup branch-cov-analyze" create_snippet
-if [ "$cov_force_commit" ]; then
- cd -P "$branch_repo"
- cov-commit-defects --dir "$cov_dir/branch" --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --stream "$stream_name" --description "$description" \\
- --version "\$(git show -q --format=%H%)" \\
- --auth-key-file "$auth_file" \\
- --snapshot-id-file "$branch_snapshot"
- echo " branch: new snapshot ID: \$(cat $branch_snapshot)" >&3
-else
- echo " branch: cov-commit-defects skipped" >&3
-fi
-EOF
-
-
- # cov-commit-defects on branch, but compare with golden
- cat <<EOF | name="branch-report-compare" \
- deps="golden-cov-commit-defects branch-cov-analyze" create_snippet
-cov-commit-defects --dir "$cov_dir/branch" --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --stream "$stream_name" --auth-key-file "$auth_file" \\
- --preview-report-v2 "$cov_dir/report.json" \\
- --comparison-snapshot-id "\$(cat $golden_snapshot)"
-EOF
-
-
- # cov-commit-defects on branch to report branch report
- cat <<EOF | name="branch-report-full" \
- deps="branch-cov-commit-defects stream-setup branch-cov-analyze" \
- create_snippet
-cov-commit-defects --dir "$cov_dir/branch" --host "$coverity_host" \\
- --https-port "$coverity_port" \\
- --stream "$stream_name" --auth-key-file "$auth_file" \\
- --preview-report-v2 "$cov_dir/report.json"
-EOF
-
local minus_j="-j"
if upon "$cov_serial_build"; then
minus_j=
@@ -334,19 +62,19 @@
# Call Coverity targets
echo "Coverity run type: ${cov_run_type:?}"
- if ! eval MAKEFLAGS= make -r $minus_j -f "$makefile" $cov_run_type; then
- return 1
- fi
-
- # Generate a text report
- local defects_file="$workspace/coverity_report.txt"
-
- if [ -f "$cov_dir/report.json" ]; then
- python3 "$ci_root/script/coverity_parser.py" \
- --output "$workspace/defects.json" \
- $cov_report_options \
- "$cov_dir/report.json" >"$defects_file" 2>&3 || true
- fi
+ # Remove the `make` from the front of the command line as we need to
+ # insert -C <directory> inside the makefile
+ shift
+ MAKEFLAGS= SUBMAKE="$@" make -r $minus_j -f "$makefile" -C "$workspace" \
+ auth_file=$auth_file\
+ golden_url=$golden_url\
+ golden_ref=$golden_ref\
+ tf_src_repo_url=$tf_src_repo_url\
+ cov_compiler=$cov_compiler\
+ minus_j=$minus_j\
+ description="$description"\
+ ci_root="$ci_root"\
+ $cov_run_type 2>&3 || exit 1
# If there were defects, print them out to the console. For local CI,
# print them in yellow--the same color we'd use for UNSTABLE builds.
@@ -362,7 +90,7 @@
echo_w "$(tput sgr0)"
fi
echo_w
- echo_w "$(wc -l < "$defects_file") defects reported."
+ cat $defects_summary >&3
echo_w
build_unstable >&3
echo_w
diff --git a/script/tf-coverity/common-def.sh b/script/tf-coverity/common-def.sh
index 71640f2..746e9bc 100644
--- a/script/tf-coverity/common-def.sh
+++ b/script/tf-coverity/common-def.sh
@@ -5,11 +5,11 @@
# SPDX-License-Identifier: BSD-3-Clause
#
-JENKINS_URL=http://ssg-sw.cambridge.arm.com/jenkins
+JENKINS_URL=https://jenkins.oss.arm.com/
# mbed TLS source tag to checkout when building Trusted Firmware with Trusted
# Board Boot support.
-MBED_TLS_SOURCES_TAG="mbedtls-2.16.0"
+MBED_TLS_SOURCES_TAG="mbedtls-2.18.0"
-ARMCLANG_PATH=
+ARMCLANG_PATH=/arm/warehouse/Distributions/FA/ARMCompiler/6.8/25/standalone-linux-x86_64-rel/bin/armclang
CRYPTOCELL_LIB_PATH=/arm/projectscratch/ssg/trusted-fw/dummy-crypto-lib
diff --git a/script/tf-coverity/cov-2019.03-fix.patch b/script/tf-coverity/cov-2019.03-fix.patch
new file mode 100755
index 0000000..cc7a06a
--- /dev/null
+++ b/script/tf-coverity/cov-2019.03-fix.patch
@@ -0,0 +1,16 @@
+--- cov-analysis-linux64-2019.03/config/templates/gnu/compiler-compat-arm-intrin.h 2019-12-12 16:13:12.807998525 -0600
++++ cov-analysis-linux64-2019.03/config/templates/gnu/compiler-compat-arm-intrin_new.h 2019-12-12 16:14:23.559761391 -0600
+@@ -1373,11 +1373,11 @@
+ typedef signed char int8_t;
+ typedef short int int16_t;
+ typedef int int32_t;
+-typedef long int int64_t;
++typedef signed long long int64_t;
+ typedef unsigned char uint8_t;
+ typedef short unsigned int uint16_t;
+ typedef unsigned int uint32_t;
+-typedef long unsigned int uint64_t;
++typedef unsigned long long uint64_t;
+
+ __Int8x8_t __builtin_aarch64_tbl3v8qi(__builtin_aarch64_simd_oi, __Int8x8_t);
+ __Float32x2_t __builtin_aarch64_absv2sf(__Float32x2_t);
diff --git a/script/tf-coverity/coverity_tf_conf.py b/script/tf-coverity/coverity_tf_conf.py
index c4fbb4c..5be7c7d 100644
--- a/script/tf-coverity/coverity_tf_conf.py
+++ b/script/tf-coverity/coverity_tf_conf.py
@@ -15,31 +15,43 @@
# - description aims at providing the reason why the files are expected
# to be excluded.
exclude_paths = [
- ("drivers/arm/cci400/cci400.c", "deprecated driver"),
- ("drivers/arm/gic/v3/arm_gicv3_common.c", "platform to exercise GIC-500/600 powerdown not available yet"),
- ("drivers/arm/tzc400/tzc400.c", "deprecated driver"),
- ("drivers/arm/tzc/tzc_common_private.c",
- "file included, actually indirectly analyzed"),
- ("drivers/arm/tzc/tzc_dmc500.c", "not used by any upstream platform"),
-
- ("drivers/io/io_dummy.c", "not used by any upstream platform"),
- ("drivers/partition/gpt.c", "not used by any upstream platform"),
- ("drivers/partition/partition.c", "not used by any upstream platform"),
-
- ("lib/aarch64/xlat_tables.c", "deprecated library code"),
+ ("drivers/arm/tzc/tzc_common_private.c", "File included, actually indirectly analyzed"),
+ ("drivers/marvell/comphy/phy-comphy-3700.c", "File is actually analyzed. False positive"),
+ ("drivers/marvell/comphy/phy-comphy-cp110.c", "File is actually analyzed. False positive"),
+ ("drivers/marvell/gwin.c", "Not used by any upstream marvell platform"),
+ ("drivers/marvell/mochi/ap807_setup.c", "Not used by any upstream marvell platform"),
+ ("drivers/renesas/rcar/ddr/ddr_b/boot_init_dram_config.c",
+ "It is used as a header file and is included in boot_init_dram.c .Since it is not explicitly compiled, such file cannot be converted into an instrumented binary for further analysis"),
+ ("drivers/auth/cryptocell/713/.*", "There is no dummy library to support 713 for now. This can be removed once we have this library in place"),
+ ("drivers/st/scmi-msg/.*", "Not used by any upstream platform"),
("plat/arm/board/fvp/fconf/fconf_nt_config_getter.c", "Not currently used. Future functionality"),
- ("plat/arm/common/arm_tzc_dmc500.c", "not used by any upstream platform"),
+ ("plat/marvell/armada/a8k/common/plat_bl1_setup.c", "Not used by any upstream marvell platform"),
+ ("plat/mediatek/common/custom/oem_svc.c", "Used only by mt6795 which is unsupported platform"),
+ ("plat/mediatek/mt6795/.*", "This platform fails to build and is not supported by mediatek"),
+ ("plat/mediatek/mt8173/plat_mt_gic.c", "Deprecated code"),
+ ("plat/nvidia/tegra/common/tegra_gicv3.c", "Not used by any upstream nvidia platform"),
+ ("plat/qemu/common/sp_min/sp_min_setup.c", "Not used in any upstream platform - see GENFW-2164"),
+ ("plat/rockchip/rk3399/drivers/m0/.*", "Work around the lack of support for the M0 compiler in the scripts"),
- ("plat/mediatek/mt8173/plat_mt_gic.c", "deprecated code"),
+ # The following block is excluding files that are impossible to include in a build due to a missing file
+ # this should be removed as soon as it would be possible to build stingray platform with SCP_BL2 option
+ ("drivers/brcm/iproc_gpio.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("drivers/brcm/scp.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("drivers/brcm/spi/iproc_qspi.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("drivers/brcm/spi/iproc_spi.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("drivers/brcm/spi_flash.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("drivers/brcm/spi_sf.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/board/common/bcm_elog_ddr.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/board/stingray/src/brcm_pm_ops.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/board/stingray/src/ncsi.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/board/stingray/src/scp_cmd.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/board/stingray/src/scp_utils.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/common/brcm_mhu.c", "Cannot be built due to the missing m0_ipc.h file"),
+ ("plat/brcm/common/brcm_scpi.c", "Cannot be built due to the missing m0_ipc.h file"),
- ("lib/aarch32/arm32_aeabi_divmod.c", "not used by any upstream platform"),
-
- # Waiting for the following patch to be available:
- # http://ssg-sw.cambridge.arm.com/gerrit/#/c/49862/
- ("plat/rockchip/rk3399/drivers/m0/.*",
- "work around the lack of support for the M0 compiler in the scripts"),
+ ("lib/compiler-rt/.*", "3rd party libraries will not be fixed"),
+ ("lib/libfdt/.*", "3rd party libraries will not be fixed"),
("tools/.*", "Host tools"),
- ("plat/qemu/sp_min/sp_min_setup.c", "not used in any upstream platform - see GENFW-2164")
]
diff --git a/script/tf-coverity/run_coverity_on_tf.py b/script/tf-coverity/run_coverity_on_tf.py
index a29b5f3..b087c61 100755
--- a/script/tf-coverity/run_coverity_on_tf.py
+++ b/script/tf-coverity/run_coverity_on_tf.py
@@ -36,7 +36,7 @@
def get_coverity_tool():
- coverity_tarball = "cov-analysis-linux64-2017.07.tar.gz"
+ coverity_tarball = "cov-analysis-linux64-2019.03.tar.gz"
url = "http://files.oss.arm.com/downloads/tf-a/" + coverity_tarball
print("Downloading Coverity Build tool from %s..." % url)
file_handle = urllib.request.urlopen(url)
@@ -55,6 +55,14 @@
cov_dir_path = os.path.abspath(os.path.join(cov_dir_name, "bin"))
print(" export PATH=%s$PATH" % (cov_dir_path + os.pathsep))
+ # Patch is needed for coverity version 2019.03
+ patch_file = os.path.abspath(os.path.join(__file__, os.pardir, "cov-2019.03-fix.patch"))
+ cov_file = os.path.abspath(os.path.join(cov_dir_name, "config",
+ "templates", "gnu", "compiler-compat-arm-intrin.h"))
+ print("Patching file")
+ print(cov_file)
+ utils.exec_prog("patch", [cov_file, "-i", patch_file],
+ out=subprocess.PIPE, out_text_mode=True)
def print_coverage(coverity_dir, tf_dir, exclude_paths=[], log_filename=None):
analyzed = []
@@ -70,8 +78,9 @@
# Get the list of files analyzed by Coverity.
#
# To do that, we examine the build log file Coverity generated and look for
- # compilation lines. These are the lines starting with "COMPILING:". We consider
- # only those lines that actually compile C files, i.e. lines of the form:
+ # compilation lines. These are the lines starting with "COMPILING:" or
+ # "EXECUTING:". We consider only those lines that actually compile C files,
+ # i.e. lines of the form:
# gcc -c file.c -o file.o
# This filters out other compilation lines like generation of dependency files
# (*.d) and such.
@@ -79,7 +88,8 @@
coverity_build_log = os.path.join(coverity_dir, "build-log.txt")
with open(coverity_build_log, encoding="utf-8") as build_log:
for line in build_log:
- results = re.search("COMPILING:.*-c *(.*\.c).*-o.*\.o", line)
+ line = re.sub('//','/', line)
+ results = re.search("(?:COMPILING|EXECUTING):.*-c *(.*\.c).*-o.*\.o", line)
if results is not None:
filename = results.group(1)
if filename not in analyzed:
diff --git a/script/tf-coverity/tf-cov-make b/script/tf-coverity/tf-cov-make
index 4ab8696..d643ab8 100755
--- a/script/tf-coverity/tf-cov-make
+++ b/script/tf-coverity/tf-cov-make
@@ -76,12 +76,21 @@
clean_build $fvp_common_flags ${ARM_TBB_OPTIONS} SPD=opteed
clean_build $fvp_common_flags ${ARM_TBB_OPTIONS} SPD=tlkd
+# Dualroot chain of trust.
+clean_build $fvp_common_flags ${ARM_TBB_OPTIONS} SPD=tspd COT=dualroot
+
clean_build -j PLAT=fvp DEBUG=1 SPD=trusty
clean_build -j PLAT=fvp DEBUG=1 SPD=trusty TRUSTY_SPD_WITH_GENERIC_SERVICES=1
# SDEI
clean_build PLAT=fvp DEBUG=1 SDEI_SUPPORT=1 EL3_EXCEPTION_HANDLING=1
+# SDEI with fconf
+clean_build PLAT=fvp DEBUG=1 SDEI_IN_FCONF=1 SDEI_SUPPORT=1 EL3_EXCEPTION_HANDLING=1
+
+# Secure interrupt descriptors with fconf
+clean_build PLAT=fvp DEBUG=1 SEC_INT_DESC_IN_FCONF=1
+
# Without coherent memory
clean_build $fvp_common_flags ${ARM_TBB_OPTIONS} ARM_TSP_RAM_LOCATION=dram SPD=tspd USE_COHERENT_MEM=0
@@ -117,17 +126,54 @@
clean_build $fvp_common_flags CROSS_COMPILE=arm-none-eabi- \
ARCH=aarch32 AARCH32_SP=sp_min ARM_XLAT_TABLES_LIB_V1=1 RECLAIM_INIT_CODE=0
-# Using GIC600 driver
-clean_build $fvp_common_flags FVP_USE_GIC_DRIVER=FVP_GIC600
+# SPM support based on Management Mode Interface Specification
+clean_build $fvp_common_flags SPM_MM=1 EL3_EXCEPTION_HANDLING=1
-# SPM support
-clean_build $fvp_common_flags ENABLE_SPM=1 EL3_EXCEPTION_HANDLING=1
+# SPM support with TOS(optee) as SPM sitting at S-EL1
+clean_build $fvp_common_flags SPD=spmd SPMD_SPM_AT_SEL2=0
+
+# SPM support with Secure hafnium as SPM sitting at S-EL2
+# SP_LAYOUT_FILE is used only during FIP creation but build won't progress
+# if we have NULL value to it, so passing a dummy string.
+clean_build $fvp_common_flags SPD=spmd SPMD_SPM_AT_SEL2=1 ARM_ARCH_MINOR=4 \
+ CTX_INCLUDE_EL2_REGS=1 SP_LAYOUT_FILE=dummy
#BL2 at EL3 support
clean_build $fvp_common_flags BL2_AT_EL3=1
clean_build $fvp_common_flags CROSS_COMPILE=arm-none-eabi- \
ARCH=aarch32 AARCH32_SP=sp_min BL2_AT_EL3=1
+# RAS Extension Support
+clean_build $fvp_common_flags EL3_EXCEPTION_HANDLING=1 \
+ FAULT_INJECTION_SUPPORT=1 HANDLE_EA_EL3_FIRST=1 RAS_EXTENSION=1 \
+ SDEI_SUPPORT=1
+
+# Hardware Assisted Coherency(DynamIQ)
+clean_build $fvp_common_flags FVP_CLUSTER_COUNT=1 FVP_MAX_CPUS_PER_CLUSTER=8 \
+ HW_ASSISTED_COHERENCY=1 USE_COHERENT_MEM=0
+
+# Pointer Authentication Support
+clean_build $fvp_common_flags CTX_INCLUDE_PAUTH_REGS=1 \
+ ARM_ARCH_MINOR=5 EL3_EXCEPTION_HANDLING=1 BRANCH_PROTECTION=1 SDEI_SUPPORT=1 SPD=tspd TSP_NS_INTR_ASYNC_PREEMPT=1
+
+# Undefined Behaviour Sanitizer
+# Building with UBSAN SANITIZE_UB=on increases the executable size.
+# Hence it is only properly supported in bl31 with RESET_TO_BL31 enabled
+make $fvp_common_flags clean
+make $fvp_common_flags SANITIZE_UB=on RESET_TO_BL31=1 bl31
+
+# debugfs feature
+clean_build $fvp_common_flags DEBUG=1 USE_DEBUGFS=1
+
+# MPAM feature
+clean_build $fvp_common_flags ENABLE_MPAM_FOR_LOWER_ELS=1
+
+# Using GICv3.1 driver with extended PPI and SPI range
+clean_build $fvp_common_flags GIC_EXT_INTID=1
+
+# Using GICv4 features with extended PPI and SPI range
+clean_build $fvp_common_flags GIC_ENABLE_V4_EXTN=1 GIC_EXT_INTID=1
+
# Measured Boot
clean_build $fvp_common_flags ${ARM_TBB_OPTIONS} MEASURED_BOOT=1
@@ -140,22 +186,93 @@
clean_build $juno_common_flags EL3_PAYLOAD=0x80000000
clean_build $juno_common_flags ENABLE_STACK_PROTECTOR=strong
clean_build $juno_common_flags CSS_USE_SCMI_SDS_DRIVER=0
-clean_build $juno_common_flags SPD=tspd ${ARM_TBB_OPTIONS} ARM_CRYPTOCELL_INTEG=1 CCSBROM_LIB_PATH=${CRYPTOCELL_LIB_PATH}
+clean_build $juno_common_flags SPD=tspd ${ARM_TBB_OPTIONS} ARM_CRYPTOCELL_INTEG=1 CCSBROM_LIB_PATH=${CRYPTOCELL_LIB_PATH} KEY_SIZE=2048
#
# System Guidance for Infrastructure platform SGI575
+# Enable build config with RAS_EXTENSION to cover more files
+make -j DEBUG=1 PLAT=sgi575 ${ARM_TBB_OPTIONS} EL3_EXCEPTION_HANDLING=1 FAULT_INJECTION_SUPPORT=1 \
+ HANDLE_EA_EL3_FIRST=1 RAS_EXTENSION=1 SDEI_SUPPORT=1 SPM_MM=1 all
#
-make -j DEBUG=1 PLAT=sgi575 all
+# System Guidance for Mobile platform SGM775
+#
+make -j DEBUG=1 PLAT=sgm775 ${ARM_TBB_OPTIONS} SPD=tspd \
+ CSS_USE_SCMI_SDS_DRIVER=1 all
#
# System Guidance for Infrastructure platform RD-N1Edge
#
-make -j DEBUG=1 PLAT=rdn1edge all
+make -j DEBUG=1 PLAT=rdn1edge ${ARM_TBB_OPTIONS} all
#
# System Guidance for Infrastructure platform RD-E1Edge
#
-make -j DEBUG=1 PLAT=rde1edge all
+make -j DEBUG=1 PLAT=rde1edge ${ARM_TBB_OPTIONS} CSS_SGI_CHIP_COUNT=1 all
+
+#
+# System Guidance for Infrastructure platform RD-Daniel
+#
+make -j DEBUG=1 PLAT=rddaniel ${ARM_TBB_OPTIONS} all
+
+#
+# System Guidance for Infrastructure platform RD-Danielxlr
+#
+make -j DEBUG=1 PLAT=rddanielxlr ${ARM_TBB_OPTIONS} CSS_SGI_CHIP_COUNT=4 all
+
+#
+# Neoverse N1 SDP platform
+#
+make -j DEBUG=1 PLAT=n1sdp ${ARM_TBB_OPTIONS} all
+
+#
+# FVP VE platform
+#
+make -j DEBUG=1 PLAT=fvp_ve AARCH32_SP=sp_min ARCH=aarch32 \
+ CROSS_COMPILE=arm-none-eabi- ARM_ARCH_MAJOR=7 \
+ ARM_CORTEX_A5=yes ARM_XLAT_TABLES_LIB_V1=1 \
+ FVP_HW_CONFIG_DTS=fdts/fvp-ve-Cortex-A5x1.dts all
+
+#
+# A5 DesignStart Platform
+#
+make -j DEBUG=1 PLAT=a5ds AARCH32_SP=sp_min ARCH=aarch32 \
+ ARM_ARCH_MAJOR=7 ARM_CORTEX_A5=yes ARM_XLAT_TABLES_LIB_V1=1 \
+ CROSS_COMPILE=arm-none-eabi- FVP_HW_CONFIG_DTS=fdts/a5ds.dts
+
+#
+# Corstone700 Platform
+#
+
+corstone700_common_flags="CROSS_COMPILE=arm-none-eabi- \
+ PLAT=corstone700 \
+ ARCH=aarch32 \
+ RESET_TO_SP_MIN=1 \
+ AARCH32_SP=sp_min \
+ ARM_LINUX_KERNEL_AS_BL33=0 \
+ ARM_PRELOADED_DTB_BASE=0x80400000 \
+ ENABLE_PIE=1 \
+ DEBUG=1 \
+ ENABLE_STACK_PROTECTOR=all \
+ all"
+
+echo "Info: Building Corstone700 FVP ..."
+
+make TARGET_PLATFORM=fvp ${corstone700_common_flags}
+
+echo "Info: Building Corstone700 FPGA ..."
+
+make TARGET_PLATFORM=fpga ${corstone700_common_flags}
+
+#
+# Arm internal FPGA port
+#
+make PLAT=arm_fpga $external_plat_common_flags CROSS_COMPILE=aarch64-none-elf- \
+ FPGA_PRELOADED_DTB_BASE=0x88000000 PRELOADED_BL33_BASE=0x82080000 all
+
+#
+# Total Compute platform
+#
+make -j DEBUG=1 PLAT=tc0 ${ARM_TBB_OPTIONS} all
# Partners' platforms.
# Enable as many features as possible.
@@ -164,10 +281,14 @@
external_plat_common_flags="-j DEBUG=1"
make PLAT=mt8173 $external_plat_common_flags all
+make PLAT=mt8183 $external_plat_common_flags all
+make PLAT=rk3288 CROSS_COMPILE=arm-none-eabi- \
+ $external_plat_common_flags ARCH=aarch32 AARCH32_SP=sp_min all
make PLAT=rk3368 $external_plat_common_flags COREBOOT=1 all
-make PLAT=rk3399 $external_plat_common_flags COREBOOT=1 all
-make PLAT=rk3328 $external_plat_common_flags COREBOOT=1 all
+make PLAT=rk3399 $external_plat_common_flags COREBOOT=1 PLAT_RK_DP_HDCP=1 all
+make PLAT=rk3328 $external_plat_common_flags COREBOOT=1 PLAT_RK_SECURE_DDR_MINILOADER=1 all
+make PLAT=px30 $external_plat_common_flags PLAT_RK_SECURE_DDR_MINILOADER=1 all
# Although we do several consecutive builds for the Tegra platform below, we
# don't need to clean between each one because the Tegra makefiles specify
@@ -175,6 +296,7 @@
make PLAT=tegra TARGET_SOC=t210 $external_plat_common_flags all
make PLAT=tegra TARGET_SOC=t132 $external_plat_common_flags all
make PLAT=tegra TARGET_SOC=t186 $external_plat_common_flags all
+make PLAT=tegra TARGET_SOC=t194 $external_plat_common_flags all
# For the Xilinx platform, artificially increase the extents of BL31 memory
# (using the platform-specific build options ZYNQMP_ATF_MEM_{BASE,SIZE}).
@@ -185,23 +307,163 @@
ZYNQMP_ATF_MEM_BASE=0xFFFC0000 ZYNQMP_ATF_MEM_SIZE=0x00040000 \
all
+# Build both for silicon (default) and virtual QEMU platform.
+clean_build PLAT=versal $external_plat_common_flags
+clean_build PLAT=versal $external_plat_common_flags VERSAL_PLATFORM=versal_virt
+
+# Platforms from Allwinner
+make PLAT=sun50i_a64 $external_plat_common_flags all
+make PLAT=sun50i_h6 $external_plat_common_flags all
+
+# Platforms from i.MX
+make AARCH32_SP=optee ARCH=aarch32 ARM_ARCH_MAJOR=7 ARM_CORTEX_A7=yes \
+ CROSS_COMPILE=arm-none-eabi- PLAT=warp7 ${TBB_OPTIONS} \
+ $external_plat_common_flags all
+make AARCH32_SP=optee ARCH=aarch32 CROSS_COMPILE=arm-none-eabi- PLAT=picopi \
+ $external_plat_common_flags all
+make PLAT=imx8mm $external_plat_common_flags all
+make PLAT=imx8mn $external_plat_common_flags all
+make PLAT=imx8mp $external_plat_common_flags all
+
+# Temporarily building in release mode until the following ticket is resolved:
+# https://developer.trustedfirmware.org/T626
+# make PLAT=imx8mq $external_plat_common_flags all
+make PLAT=imx8mq -j all
+
+make PLAT=imx8qm $external_plat_common_flags all
+make PLAT=imx8qx $external_plat_common_flags all
+
+# Platforms from Intel
+make PLAT=stratix10 $external_plat_common_flags all
+make PLAT=agilex $external_plat_common_flags all
+
+# Platforms from Broadcom
+clean_build PLAT=stingray BOARD_CFG=bcm958742t INCLUDE_EMMC_DRIVER_ERASE_CODE=1
+clean_build PLAT=stingray BOARD_CFG=bcm958742t-ns3 INCLUDE_EMMC_DRIVER_ERASE_CODE=1
+
+# Platforms from Marvell
+make PLAT=a3700 $external_plat_common_flags SCP_BL2=/dev/null all
+# Source files from mv-ddr-marvell repository are necessary
+# to build below four platforms
+wget http://files.oss.arm.com/downloads/tf-a/mv-ddr-marvell/mv-ddr-marvell-a881467ef0f0185e6570dd0483023fde93cbb5f5.tar.gz 2> /dev/null
+tar -xzf mv-ddr-marvell-a881467ef0f0185e6570dd0483023fde93cbb5f5.tar.gz 2> /dev/null
+mv mv-ddr-marvell drivers/marvell/mv_ddr
+
+# These platforms from Marvell have dependency on GCC-6.2.1 toolchain
+make PLAT=a80x0 DEBUG=1 SCP_BL2=/dev/null \
+ CROSS_COMPILE=/arm/pdsw/tools/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- all
+make PLAT=a80x0_mcbin DEBUG=1 SCP_BL2=/dev/null \
+ CROSS_COMPILE=/arm/pdsw/tools/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- all
+make PLAT=a70x0 DEBUG=1 SCP_BL2=/dev/null \
+ CROSS_COMPILE=/arm/pdsw/tools/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- all
+make PLAT=a70x0_amc DEBUG=1 SCP_BL2=/dev/null \
+ CROSS_COMPILE=/arm/pdsw/tools/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- all
+make PLAT=a80x0_puzzle DEBUG=1 SCP_BL2=/dev/null \
+ CROSS_COMPILE=/arm/pdsw/tools/gcc-linaro-6.2.1-2016.11-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu- all
+
+# Removing the source files
+rm -rf drivers/marvell/mv_ddr 2> /dev/null
+
+# Platforms from Meson
+make PLAT=gxbb $external_plat_common_flags all
+make PLAT=gxl $external_plat_common_flags all
+make PLAT=g12a $external_plat_common_flags all
+
+# Platforms from Renesas
+# Renesas R-Car D3 Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile LIFEC_DBSC_PROTECT_ENABLE=0 LSI=D3 \
+ MBEDTLS_DIR=$(pwd)/mbedtls PMIC_ROHM_BD9571=0 \
+ RCAR_AVS_SETTING_ENABLE=0 SPD=none RCAR_LOSSY_ENABLE=0 \
+ RCAR_SA0_SIZE=0 RCAR_SYSTEM_SUSPEND=0 TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car H3 Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile MBEDTLS_DIR=$(pwd)/mbedtls LSI=H3 \
+ MACHINE=ulcb PMIC_LEVEL_MODE=0 RCAR_DRAM_LPDDR4_MEMCONF=0 \
+ RCAR_DRAM_SPLIT=1 RCAR_GEN3_ULCB=1 SPD=opteed \
+ TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car H3N Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile MBEDTLS_DIR=$(pwd)/mbedtls LSI=H3N \
+ SPD=opteed TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car M3 Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile MBEDTLS_DIR=$(pwd)/mbedtls LSI=M3 \
+ MACHINE=ulcb PMIC_LEVEL_MODE=0 RCAR_DRAM_LPDDR4_MEMCONF=0 \
+ RCAR_DRAM_SPLIT=2 RCAR_GEN3_ULCB=1 SPD=opteed \
+ TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car M3N Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile MBEDTLS_DIR=$(pwd)/mbedtls LSI=M3N \
+ MACHINE=ulcb PMIC_LEVEL_MODE=0 RCAR_DRAM_LPDDR4_MEMCONF=0 \
+ RCAR_GEN3_ULCB=1 SPD=opteed TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car E3 Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ BL33=Makefile MBEDTLS_DIR=$(pwd)/mbedtls LSI=E3 \
+ RCAR_AVS_SETTING_ENABLE=0 RCAR_DRAM_DDR3L_MEMCONF=0 \
+ RCAR_SA0_SIZE=0 SPD=opteed TRUSTED_BOARD_BOOT=1
+
+# Renesas R-Car V3M Automotive SoC
+clean_build PLAT=rcar $external_plat_common_flags BL32=Makefile \
+ MBEDTLS_DIR=$(pwd)/mbedtls BL33=Makefile LSI=V3M MACHINE=eagle \
+ PMIC_ROHM_BD9571=0 RCAR_DRAM_SPLIT=0 RCAR_SYSTEM_SUSPEND=0 \
+ AVS_SETTING_ENABLE=0 SPD=none TRUSTED_BOARD_BOOT=1
+
+# Platforms from ST
+make PLAT=stm32mp1 CROSS_COMPILE=arm-none-eabi- \
+ $external_plat_common_flags ARM_ARCH_MAJOR=7 STM32MP_EMMC=1 \
+ STM32MP_RAW_NAND=1 STM32MP_SDMMC=1 STM32MP_SPI_NAND=1 STM32MP_SPI_NOR=1 \
+ ARCH=aarch32 AARCH32_SP=sp_min ENABLE_STACK_PROTECTOR=strong bl1 bl2 bl32
+
+# Platforms from TI
+make PLAT=k3 $external_plat_common_flags all
+
clean_build PLAT=qemu $external_plat_common_flags ${TBB_OPTIONS}
-clean_build PLAT=qemu $external_plat_common_flags ENABLE_STACK_PROTECTOR=strong
+# Use GICV3 driver
+clean_build PLAT=qemu $external_plat_common_flags QEMU_USE_GIC_DRIVER=QEMU_GICV3 \
+ ENABLE_STACK_PROTECTOR=strong
+# Use encrypted FIP feature.
+clean_build PLAT=qemu $external_plat_common_flags ${TBB_OPTIONS} \
+ BL32_RAM_LOCATION=tdram DECRYPTION_SUPPORT=aes_gcm ENCRYPT_BL31=1 \
+ ENCRYPT_BL32=1 FW_ENC_STATUS=0 SPD=opteed
+
+clean_build PLAT=qemu_sbsa $external_plat_common_flags
# For hikey enable PMF to include all files in the platform port
-make PLAT=hikey $external_plat_common_flags ENABLE_PMF=1 all
-make PLAT=hikey960 $external_plat_common_flags all
+make PLAT=hikey $external_plat_common_flags ${TBB_OPTIONS} ENABLE_PMF=1 all
+make PLAT=hikey960 $external_plat_common_flags ${TBB_OPTIONS} all
+make PLAT=poplar $external_plat_common_flags all
+# Platforms from Socionext
clean_build PLAT=uniphier $external_plat_common_flags ${TBB_OPTIONS} SPD=tspd
clean_build PLAT=uniphier $external_plat_common_flags FIP_GZIP=1
+clean_build PLAT=synquacer $external_plat_common_flags SPM_MM=1 \
+ EL3_EXCEPTION_HANDLING=1 PRELOADED_BL33_BASE=0x0
+
+# Support for SCP Message Interface protocol with platform specific drivers
+clean_build PLAT=synquacer $external_plat_common_flags \
+ PRELOADED_BL33_BASE=0x0 SQ_USE_SCMI_DRIVER=1
+
make PLAT=poplar $external_plat_common_flags all
-make PLAT=rpi3 $external_plat_common_flags PRELOADED_BL33_BASE=0xDEADBEEF all
+# Raspberry Pi Platforms
+make PLAT=rpi3 $external_plat_common_flags ${TBB_OPTIONS} \
+ ENABLE_STACK_PROTECTOR=strong PRELOADED_BL33_BASE=0xDEADBEEF all
+make PLAT=rpi4 $external_plat_common_flags all
# Cannot use $external_plat_common_flags for LS1043 platform, as then
# the binaries do not fit in memory.
clean_build PLAT=ls1043 SPD=opteed ENABLE_STACK_PROTECTOR=strong
clean_build PLAT=ls1043 SPD=tspd
+# A113D (AXG) platform.
+clean_build PLAT=axg $external_plat_common_flags SPD=opteed
+clean_build PLAT=axg $external_plat_common_flags AML_USE_ATOS=1
+
cd ..