Sync job files with internal CI

Sync job files with platform-ci commit:
539c151d0cd99a5e6ca6c0e6966f6d8579fe864e

Signed-off-by: Zelalem <zelalem.aweke@arm.com>
Change-Id: Ida470e00da76188ce3987d1fa93ec758b5e0f23a
diff --git a/job/tf-worker/generate_yaml.sh b/job/tf-worker/generate_yaml.sh
index 37a0ae0..60ae846 100755
--- a/job/tf-worker/generate_yaml.sh
+++ b/job/tf-worker/generate_yaml.sh
@@ -9,6 +9,8 @@
 
 if echo "$RUN_CONFIG" | grep -iq 'tftf'; then
 	payload_type="tftf"
+elif echo "$RUN_CONFIG" | grep -iq 'scmi'; then
+	payload_type="scp_tests_scmi"
 else
 	payload_type="linux"
 fi
diff --git a/job/tf-worker/manage_artefacts.sh b/job/tf-worker/manage_artefacts.sh
index 865afd9..dbfc4eb 100755
--- a/job/tf-worker/manage_artefacts.sh
+++ b/job/tf-worker/manage_artefacts.sh
@@ -8,6 +8,8 @@
 set -e
 
 if [ -d artefacts ]; then
-	# Remove everything except logs
-	find artefacts -type f -not \( -name "*.log" \) -exec rm -f {} +
+	# Remove everything except logs and scan-build artefacts such as
+	# .html, .js and .css files useful for offline debug of static
+	# analysis defects
+	find artefacts -type f -not \( -name "*.log" -o -name "*.html" -o -name "*.js" -o -name "*.css" \) -exec rm -f {} +
 fi
diff --git a/job/tf-worker/run_fvp_test.sh b/job/tf-worker/run_fvp_test.sh
index 2a62eab..4ec3912 100755
--- a/job/tf-worker/run_fvp_test.sh
+++ b/job/tf-worker/run_fvp_test.sh
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Copyright (c) 2019, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
 #
 # SPDX-License-Identifier: BSD-3-Clause
 #
@@ -8,8 +8,14 @@
 set -e
 
 # Build
+export COVERAGE_ON=$(echo "$RUN_CONFIG" | grep -v 'aarch32' | grep -qE 'bmcov' && echo 1 || echo 0)
+if [ $COVERAGE_ON -eq 1 ]; then
+	source "$CI_ROOT/script/build_bmcov.sh"
+fi
+
 "$CI_ROOT/script/build_package.sh"
 
+
 if [ "$skip_runs" ]; then
 	exit 0
 fi
@@ -17,4 +23,76 @@
 # Execute test locally for FVP configs
 if [ "$RUN_CONFIG" != "nil" ] && echo "$RUN_CONFIG" | grep -iq '^fvp'; then
 	"$CI_ROOT/script/run_package.sh"
+	if [ $COVERAGE_ON -eq 1 ]; then
+		ELF_FOLDER=""
+		DEBUG_FOLDER=${artefacts}/debug
+		RELEASE_FOLDER=${artefacts}/release
+		if ls "${DEBUG_FOLDER}/"*.elf &> /dev/null;then
+			export ELF_FOLDER=$DEBUG_FOLDER
+		elif ls "${RELEASE_FOLDER}/"*.elf &> /dev/null;then
+			export ELF_FOLDER=$RELEASE_FOLDER
+		else
+			# If elf files are not present, report can't be produced
+			echo "ELF files not present, aborting reports..."
+			exit 0
+		fi
+		export OUTDIR=${WORKSPACE}/html
+		test_config=${TEST_CONFIG}
+		if [ -n "$CC_SCP_REFSPEC" ]; then #SCP
+			export JENKINS_SOURCES_WORKSPACE="${scp_root:-$workspace}"
+			if grep -q "fvp-linux.sgi" <<< "$test_config"; then
+				export LIST_OF_BINARIES=${LIST_OF_BINARIES:-"scp_ram scp_rom mcp_rom"}
+			elif grep -q "fvp-sgm775" <<< "$test_config"; then
+				export LIST_OF_BINARIES=${LIST_OF_BINARIES:-"scp_ram scp_rom"}
+			fi
+			export OBJDUMP="$(which 'arm-none-eabi-objdump')"
+			export READELF="$(which 'arm-none-eabi-readelf')"
+			export REPO=SCP
+		else # TF-A
+			export JENKINS_SOURCES_WORKSPACE="${tf_root:-$workspace}"
+			export LIST_OF_BINARIES=${LIST_OF_BINARIES:-"bl1 bl2 bl31"}
+			export OBJDUMP="$(which 'aarch64-none-elf-objdump')"
+			export READELF="$(which 'aarch64-none-elf-readelf')"
+			export REPO=TRUSTED_FIRMWARE
+		fi
+		echo "Toolchain:$OBJDUMP"
+
+		mkdir -p ${OUTDIR}
+		sync
+		sleep 5 #wait for trace files to be written
+		if [ $(ls -1 ${DEBUG_FOLDER}/${trace_file_prefix}-* 2>/dev/null | wc -l) != 0 ]; then
+			export TRACE_FOLDER=${DEBUG_FOLDER}
+		elif [ $(ls -1 ${RELEASE_FOLDER}/${trace_file_prefix}-* 2>/dev/null | wc -l) != 0 ]; then
+			export TRACE_FOLDER=${RELEASE_FOLDER}
+		else
+			echo "Trace files not present, aborting reports..."
+			exit 0
+		fi
+		export REPORT_TITLE="Coverage Summary Report [Build:${BUILD_NUMBER}]"
+		# launch intermediate layer script
+		export CONFIG_JSON=${OUTDIR}/config_file.json
+		export OUTPUT_JSON=${OUTDIR}/output_file.json
+		export CSOURCE_FOLDER=source
+		export DEBUG_ELFS=${DEBUG_ELFS:-True}
+		prepare_json_configuration "${LIST_OF_BINARIES}" "${JENKINS_SOURCES_WORKSPACE}"
+		echo "Executing intermediate_layer.py ..."
+		python ${BMCOV_REPORT_FOLDER}/intermediate_layer.py --config-json "${CONFIG_JSON}"
+		ver_py=$(python -V 2>&1 | sed 's/.* \([0-9]\).\([0-9]\).*/\1\2/')
+		if [ "$ver_py" = "27" ]; then
+			python ${BMCOV_REPORT_FOLDER}/gen-coverage-report.py --config ${BMCOV_REPORT_FOLDER}/config_atf.py \
+			--prefix_workspace "$JENKINS_SOURCES_WORKSPACE"
+		else
+			echo "Python 2.7 is required for producing Bmcov reports"
+		fi
+		chmod 775 ${BMCOV_REPORT_FOLDER}/branch_coverage/branch_coverage.sh
+		echo "Running branch coverage..."
+		branch_folder=${OUTDIR}/lcov_report
+		mkdir -p ${branch_folder}
+		pushd ${BMCOV_REPORT_FOLDER}/branch_coverage
+		. branch_coverage.sh --workspace ${JENKINS_SOURCES_WORKSPACE} --json-path ${OUTPUT_JSON} --outdir ${branch_folder}
+		popd
+		export OUTDIR=${WORKSPACE}/html
+		# prepare static (Jenkins) and dynamic (python server) pages
+		prepare_html_pages
+	fi
 fi
diff --git a/job/tf-worker/run_lava.py b/job/tf-worker/run_lava.py
new file mode 100644
index 0000000..93d522c
--- /dev/null
+++ b/job/tf-worker/run_lava.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import argparse
+import os
+import subprocess
+import sys
+import logging
+import tempfile
+import yaml
+
+
+def case_infra_error(case):
+    try:
+        if case["metadata"]["error_type"] == "Infrastructure":
+            logging.error("case %s: infra error is type Infrastructure", case["id"])
+            return False
+        elif "timed out" in case["metadata"]["error_msg"]:
+            logging.error(
+                "case %s: infra error: %s", case["id"], case["metadata"]["error_msg"]
+            )
+            return False
+        else:
+            return True
+    except KeyError:
+        return True
+
+
+def not_infra_error(path):
+    """Returns a boolean indicating if there was not an infra error"""
+    try:
+        with open(path) as file:
+            results = yaml.safe_load(file)
+        return all(case_infra_error(tc) for tc in results)
+    except FileNotFoundError:
+        logging.warning("Could not open results file %s", path)
+        return True
+
+
+def run_one_job(cmd):
+    """Run a job and return a boolean indicating if there was not an infra error.
+    Raises a `subprocess.CalledProcessError` when the called script fails.
+    """
+    subprocess.run(cmd, check=True)
+    return not_infra_error("job_results.yaml")
+
+
+def retry_job(cmd, retries):
+    """Run a job until there was not an infra error or retries are exhausted.
+    Raises a `subprocess.CalledProcessError` when the called script fails.
+    """
+    logging.debug("trying job %s up to %d times", str(cmd), retries)
+    return any(run_one_job(cmd) for _ in range(retries))
+
+
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser(
+        description="Lava job runner with infrastructure error dectection and retry."
+    )
+    parser.add_argument(
+        "script",
+        nargs="?",
+        default=os.path.join(os.path.dirname(__file__), "run_lava_job.sh"),
+        help="bash job script to run a lava job",
+    )
+    parser.add_argument(
+        "job",
+        nargs="?",
+        default=os.path.join("artefacts", os.environ["BIN_MODE"], "juno.yaml"),
+        help="the Lava job description file",
+    )
+    parser.add_argument(
+        "retries",
+        type=int,
+        nargs="?",
+        default=3,
+        help="Number of retries. defaluts to 3",
+    )
+    parser.add_argument(
+        "--save",
+        default=tempfile.mkdtemp(prefix="job-output"),
+        help="directory to store the job_output.log",
+    )
+    parser.add_argument(
+        "-v", action="count", default=0, help="Increase printing of debug ouptut"
+    )
+    args = parser.parse_args()
+    if args.v >= 2:
+        logging.getLogger().setLevel(logging.DEBUG)
+    elif args.v >= 1:
+        logging.getLogger().setLevel(logging.INFO)
+    logging.debug(args)
+    try:
+        if not retry_job([args.script, args.job, args.save], args.retries):
+            logging.critical("All jobs failed with infra errors; retries exhausted")
+            sys.exit(-1)
+        else:
+            sys.exit(0)
+    except subprocess.CalledProcessError as e:
+        logging.critical("Job script returned error code %d", e.returncode)
+        sys.exit(e.returncode)
diff --git a/job/tf-worker/should_build_local.sh b/job/tf-worker/should_build_local.sh
index 5b47866..b3fde29 100755
--- a/job/tf-worker/should_build_local.sh
+++ b/job/tf-worker/should_build_local.sh
@@ -6,7 +6,6 @@
 #
 
 set -e
-
 # If it's a Juno build-only config, or an FVP config, we do everything locally
 if [ "$RUN_CONFIG" = "nil" ]; then
 	exit 0
@@ -17,6 +16,10 @@
 		exit 0;;
 	coverity-*)
 		exit 0;;
+	scan_build-*)
+		exit 0;;
+	norun-*)
+		exit 0;;
 esac
 
 # If we're not going to run Juno, then no need to spawn tf-build-for lava;
diff --git a/job/tf-worker/submit_lava_job.sh b/job/tf-worker/submit_lava_job.sh
deleted file mode 100755
index 7b47e97..0000000
--- a/job/tf-worker/submit_lava_job.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-
-# Submit jobs to LAVA and wait until the job is complete. This script replace
-# the "managed script" previously used and provide the same behavior.
-#
-# Required arguments:
-# 1: yaml job file
-# 2: flag whether to save output, true/false, defaults to false
-#
-# output:
-# job_results.yaml
-# job_output.log if save output = true
-
-set -e
-
-JOB_FILE="$1"
-SAVE_OUTPUT="$2"
-
-LAVA_HOST=
-LAVA_USER=
-LAVA_TOKEN=
-LAVA_URL=
-
-if [ ! -f "${JOB_FILE}" ]; then
-	echo "error: LAVA job file does not exist: ${JOB_FILE}"
-	exit 1
-fi
-
-# Install lavacli with fixes
-virtualenv -p $(which python3) venv
-source venv/bin/activate
-pip install -q lavacli
-
-# Configure lavacli
-lavacli identities add \
---username $LAVA_USER \
---token $LAVA_TOKEN \
---uri ${LAVA_URL}/RPC2 \
-default
-
-# Submit a job using lavacli
-JOB_ID=$(lavacli jobs submit ${JOB_FILE})
-if [ -z "$JOB_ID" ] ; then
-	echo "Couldn't submit. Stopping."
-	exit 1
-fi
-
-echo "Job url: https://lava.oss.arm.com/scheduler/job/$JOB_ID"
-
-# Wait for the job to finish
-lavacli jobs wait $JOB_ID
-
-if [ "${SAVE_OUTPUT}" = "true" ] ; then
-	lavacli jobs logs $JOB_ID > job_output.log
-fi
-
-# Get results
-lavacli results $JOB_ID --yaml > job_results.yaml
-
-# Exit virtualenv
-deactivate