Sync scripts in 'script' directory with internal CI
Sync scripts with platform-ci commit:
539c151d0cd99a5e6ca6c0e6966f6d8579fe864e
Signed-off-by: Zelalem <zelalem.aweke@arm.com>
Change-Id: I455770dea2e3974f652de317b21e53cfc0b9199e
diff --git a/script/backup_external_repo.sh b/script/backup_external_repo.sh
new file mode 100755
index 0000000..78b36df
--- /dev/null
+++ b/script/backup_external_repo.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Make a backup of the following repositories on Github:
+# - arm-trusted-firmware-private.git
+# - arm-trusted-firmware-private.wiki.git
+# - tf-issues.git
+#
+# Also backup the following repositories from review.trustedfirmware.org:
+# - trusted-firmware-a.git
+# - tf-a-tests.git
+
+set -e
+
+ci_root="$(readlink -f "$(dirname "$0")/..")"
+source "$ci_root/utils.sh"
+
+backup_dir="${BACKUP_DIR:-/arm/ref/pd/pdsw/external-repo-backup}"
+
+
+initial_clone() {
+ local repo_url="${1:?}"
+ local repo_dir="${2:?}"
+ local repo_name="$(basename $repo_dir)"
+ local s_before s_after s_diff
+
+ s_before="$(date +%s)"
+
+ echo
+ echo "Cloning repository $repo_name..."
+
+ git clone --quiet --mirror "$repo_url" "$repo_dir"
+
+ pushd "$repo_dir"
+ git show --quiet | sed 's/^/ > /g'
+ popd
+
+ s_after="$(date +%s)"
+ let "s_diff = $s_after - $s_before" || true
+ echo "Cloned in $s_diff seconds."
+ echo
+}
+
+update_repo() {
+ local repo_dir="${1:?}"
+ local repo_name="$(basename $repo_dir)"
+ local s_before s_after s_diff
+
+ pushd "$repo_dir"
+
+ s_before="$(date +%s)"
+
+ echo
+ echo "Updating repo $repo_name..."
+
+ git gc --quiet
+ git remote update --prune
+ git show --quiet | sed 's/^/ > /g'
+
+ s_after="$(date +%s)"
+ let "s_diff = $s_after - $s_before" || true
+ echo "Updated in $s_diff seconds."
+ echo
+
+ popd
+}
+
+get_repo_url() {
+ local url_var="${1:?}"
+ local repo_location="${2:?}"
+ local repo_name="${3:?}"
+
+ case "$repo_location" in
+ "github")
+ if upon "$anonymous"; then
+ eval $url_var="https://github.com/ARM-software/$repo_name"
+ else
+ GITHUB_USER="${GITHUB_USER:-arm-tf-bot}"
+ GITHUB_PASSWORD="${GITHUB_PASSWORD:?}"
+ eval $url_var="https://$GITHUB_USER:$GITHUB_PASSWORD@github.com/ARM-software/$repo_name"
+ fi
+ ;;
+
+ "tf.org")
+ if not_upon "$anonymous"; then
+ echo "Authenticated access to repo $repo_name not supported."
+ exit 1
+ fi
+ eval $url_var="https://review.trustedfirmware.org/TF-A/$repo_name"
+ ;;
+
+ *)
+ echo "Unsupported repository location: $repo_location."
+ exit 1
+ ;;
+ esac
+}
+
+backup_repo() {
+ local repo_location="${1:?}"
+ local repo_name="${2:?}"
+ local repo_dir="${3:-$repo_location/$repo_name}"
+
+ if [ ! -d "$repo_dir" ]; then
+ local repo_url
+ get_repo_url "repo_url" "$repo_location" "$repo_name"
+ initial_clone "$repo_url" "$repo_dir"
+ else
+ update_repo "${repo_dir:?}"
+ fi
+}
+
+
+cd "$backup_dir"
+
+# Private repositories. Need arm-tf-bot credentials for authentication.
+anonymous=0 backup_repo "github" "arm-trusted-firmware-private.git"
+anonymous=0 backup_repo "github" "arm-trusted-firmware-private.wiki.git"
+
+# Public repositories. Anonymous access is allowed.
+anonymous=1 backup_repo "github" "tf-issues.git"
+
+anonymous=1 backup_repo "tf.org" "trusted-firmware-a.git"
+anonymous=1 backup_repo "tf.org" "tf-a-tests.git"
diff --git a/script/build_bmcov.sh b/script/build_bmcov.sh
new file mode 100755
index 0000000..3923ab8
--- /dev/null
+++ b/script/build_bmcov.sh
@@ -0,0 +1,151 @@
+#!/bin/bash
+#
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+source "$CI_ROOT/utils.sh"
+
+prepare_json_configuration() {
+ set +e
+ elf_files="${1:-$LIST_OF_BINARIES}"
+ jenkins_sources="${2:-$JENKINS_SOURCES_WORKSPACE}"
+ elf_array=($elf_files)
+ elf=""
+ for index in "${!elf_array[@]}"
+ do
+ if [ "${DEBUG_ELFS}" = "True" ]; then
+ cp "${ELF_FOLDER}/${elf_array[$index]}.elf" ${OUTDIR}/.
+ fi
+ read -r -d '' elf << EOM
+${elf}
+ {
+ "name": "${ELF_FOLDER}/${elf_array[$index]}.elf",
+ "traces": [
+ "${TRACE_FOLDER}/${trace_file_prefix:-covtrace}-*.log"
+ ]
+ }
+EOM
+ if [ $index -lt $((${#elf_array[@]} - 1)) ];then
+ elf="${elf},"
+ fi
+ done
+ if [ "$REPO" = "SCP" ]; then
+ read -r -d '' sources << EOM
+ [
+ {
+ "type": "git",
+ "URL": "$CC_SCP_URL",
+ "COMMIT": "$CC_SCP_COMMIT",
+ "REFSPEC": "$CC_SCP_REFSPEC",
+ "LOCATION": "scp"
+ },
+ {
+ "type": "git",
+ "URL": "$CC_CMSIS_URL",
+ "COMMIT": "$CC_CMSIS_COMMIT",
+ "REFSPEC": "$CC_CMSIS_REFSPEC",
+ "LOCATION": "scp/cmsis"
+ }
+ ]
+EOM
+ elif [ "$REPO" = "TRUSTED_FIRMWARE" ]; then
+ read -r -d '' sources << EOM
+ [
+ {
+ "type": "git",
+ "URL": "$CC_TRUSTED_FIRMWARE_URL",
+ "COMMIT": "$CC_TRUSTED_FIRMWARE_COMMIT",
+ "REFSPEC": "$CC_TRUSTED_FIRMWARE_REFSPEC",
+ "LOCATION": "trusted_firmware"
+ },
+ {
+ "type": "http",
+ "URL": "$mbedtls_archive",
+ "COMPRESSION": "xz",
+ "LOCATION": "mbedtls"
+ }
+ ]
+EOM
+ else
+ sources=""
+ fi
+metadata="\"BUILD_CONFIG\": \"${BUILD_CONFIG}\", \"RUN_CONFIG\": \"${RUN_CONFIG}\""
+cat <<EOF > "${CONFIG_JSON}"
+{
+ "configuration":
+ {
+ "remove_workspace": true,
+ "include_assembly": true
+ },
+ "parameters":
+ {
+ "sources": $sources,
+ "workspace": "${jenkins_sources}",
+ "output_file": "${OUTPUT_JSON}",
+ "metadata": {$metadata}
+ },
+ "elfs": [
+ ${elf}
+ ]
+}
+EOF
+set -e
+}
+
+prepare_html_pages() {
+ pushd ${OUTDIR}
+ cp ${BMCOV_REPORT_FOLDER}/reporter_cc.py ${OUTDIR}/.
+ if [ "${DEBUG_ELFS}" = "True" ]; then
+ cp "${TRACE_FOLDER}/${trace_file_prefix}"* ${OUTDIR}/.
+ fi
+ # to be run on the user locally
+ cat <<EOF > "server.sh"
+#!/bin/bash
+
+echo "Running server..."
+type -a firefox || (echo "Please install Firefox..." && exit 1)
+type -a python3 || (echo "Please install python3..." && exit 1)
+
+python - << EOT
+import os
+import reporter_cc
+
+output_file = os.getenv('OUTPUT_JSON', 'output_file.json')
+source_folder = os.getenv('CSOURCE_FOLDER', 'source')
+r = reporter_cc.ReportCC(output_file)
+r.clone_repo(source_folder)
+EOT
+(sleep 2; firefox --new-window http://localhost:8081) &
+python3 -m http.server 8081
+EOF
+ chmod 777 server.sh
+ zip -r server_side.zip *
+ popd
+}
+
+PVLIB_HOME=${PVLIB_HOME:-$warehouse/SysGen/PVModelLib/$model_version/$model_build/external}
+echo "Building Bmcov for code coverage..."
+source "$CI_ROOT/script/test_definitions.sh"
+export BMCOV_FOLDER="${BMCOV_FOLDER:-$workspace/test-definitions/scripts/tools/code_coverage/fastmodel_baremetal/bmcov}"
+pushd "${workspace}"
+git clone "${TEST_DEFINITIONS_REPO}" -b "${TEST_DEFINITIONS_REFSPEC}"
+popd
+pushd "${BMCOV_FOLDER}"
+export MODEL_PLUGIN_FOLDER="${BMCOV_FOLDER}"/model-plugin
+if [ -n "$(find "$warehouse" -maxdepth 0 -type d -empty 2>/dev/null)" ]; then
+ echo "$warehouse not mounted. Falling back to pre-built plugins.."
+ folder="http://files.oss.arm.com/downloads/tf-a/coverage-plugin"
+ wget -q ${folder}/{CoverageTrace.so,CoverageTrace.o,PluginUtils.o} \
+ -P "${MODEL_PLUGIN_FOLDER}"
+ else
+ make -C model-plugin PVLIB_HOME="$PVLIB_HOME"
+fi
+
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$MODEL_PLUGIN_FOLDER
+export trace_file_prefix=covtrace
+export BMCOV_REPORT_FOLDER="${BMCOV_FOLDER}"/report
+export coverage_trace_plugin="${MODEL_PLUGIN_FOLDER}"/CoverageTrace.so
+popd
diff --git a/script/build_package.sh b/script/build_package.sh
index 33d8258..6ed52b0 100755
--- a/script/build_package.sh
+++ b/script/build_package.sh
@@ -320,6 +320,7 @@
cert_args+=" --rot-key $rot_key"
local dyn_config_opts=(
+ "fw-config"
"hw-config"
"tb-fw-config"
"nt-fw-config"
@@ -442,8 +443,6 @@
url="$mbedtls_archive" saveas="$mbedtls_ar" fetch_file
mkdir "$mbedtls_dir"
extract_tarball $mbedtls_ar $mbedtls_dir
- mbedtls_dir="$mbedtls_dir/$mbedtls_repo_name"
-
fi
emit_env "MBEDTLS_DIR" "$mbedtls_dir"
@@ -789,6 +788,14 @@
set_run_env "model_path" "${1:?}"
}
+set_model_env() {
+ local var="${1:?}"
+ local val="${2?}"
+ local run_root="${archive:?}/run"
+
+ mkdir -p "$run_root"
+ echo "export $var=$val" >> "$run_root/model_env"
+}
set_run_env() {
local var="${1:?}"
local val="${2?}"
diff --git a/script/clone_repos.sh b/script/clone_repos.sh
index f552aa9..8c7f33c 100755
--- a/script/clone_repos.sh
+++ b/script/clone_repos.sh
@@ -439,9 +439,14 @@
if [ -d "$cmsis_ref_repo" ]; then
cmsis_reference="--reference $cmsis_ref_repo"
fi
-
git submodule -q update $cmsis_reference --init
-
+ # Workaround while fixing permissions on /arm/projectscratch/ssg/trusted-fw/ref-repos/cmsis
+ cd cmsis
+ code_cov_emit_param "CMSIS" "URL" "$(git remote -v | grep fetch | awk '{print $2}')"
+ code_cov_emit_param "CMSIS" "COMMIT" "$(git rev-parse HEAD)"
+ code_cov_emit_param "CMSIS" "REFSPEC" "master"
+ cd ..
+ ########################################
popd
fi
diff --git a/script/download_linaro_release.sh b/script/download_linaro_release.sh
index 6f43307..1829d6e 100755
--- a/script/download_linaro_release.sh
+++ b/script/download_linaro_release.sh
@@ -12,14 +12,28 @@
set -e
# Download all ZIP files from the chosen Linaro release
-time wget -q -c -m -A .zip -np -nd "https://releases.linaro.org/members/arm/platforms/${1:?}/"
+base="http://releases.linaro.org/members/arm/platforms/${1:?}"
+
+wget -q "$base/MD5SUMS"
+
+for file in $(awk '{print $2}' < MD5SUMS); do
+ wget "$base/$file"
+done
+
+# Check files didn't get corrupted in the transfer
+md5sum -c MD5SUMS
# Uncompress each ZIP file in its own directory (named after the ZIP file)
for zipfile in $(echo *.zip); do
echo
echo "Uncompressing file $zipfile"
- unzip -d "${zipfile%.zip}" "$zipfile"
+ directory_name="${zipfile%.zip}"
+ mkdir "$directory_name"
+
+ cd "$directory_name"
+ unzip "../$zipfile"
+ cd -
done
-rm -f *.zip
+rm -rf *.zip *.xz *.gz
diff --git a/script/gen_juno_linux_reboot_yaml.sh b/script/gen_juno_linux_reboot_yaml.sh
new file mode 100755
index 0000000..f1254eb
--- /dev/null
+++ b/script/gen_juno_linux_reboot_yaml.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Generate a YAML file in order to dispatch Juno runs on LAVA.
+# This file will boot Linux, reboot Linux, and then wait for the shell prompt
+# to declare the test as a pass after the successful reboot. Note that this
+# script would produce a meaningful output when run via. Jenkins
+#
+# $bin_mode must be set. This script outputs to STDOUT
+
+ci_root="$(readlink -f "$(dirname "$0")/..")"
+source "$ci_root/utils.sh"
+source "$ci_root/juno_utils.sh"
+
+get_recovery_image_url() {
+ local build_job="tf-build"
+ local bin_mode="${bin_mode:?}"
+
+ if upon "$jenkins_run"; then
+ echo "$jenkins_url/job/$JOB_NAME/$BUILD_NUMBER/artifact/artefacts/$bin_mode/juno_recovery.zip"
+ else
+ echo "file://$workspace/artefacts/$bin_mode/juno_recovery.zip"
+ fi
+}
+
+bootloader_prompt="${bootloader_prompt:-juno#}"
+juno_revision="${juno_revision:-juno-r0}"
+recovery_img_url="${recovery_img_url:-$(get_recovery_image_url)}"
+nfs_rootfs="${nfs_rootfs:-$juno_rootfs_url}"
+linux_prompt="${linux_prompt:-root@(.*):~#}"
+os="${os:-debian}"
+
+cat <<EOF
+device_type: juno
+job_name: tf-juno
+
+context:
+ bootloader_prompt: $bootloader_prompt
+
+tags:
+- $juno_revision
+
+timeouts:
+ # Global timeout value for the whole job.
+ job:
+ minutes: 30
+ # Unless explicitly overridden, no single action should take more than
+ # 10 minutes to complete.
+ action:
+ minutes: 10
+
+priority: medium
+visibility: public
+
+actions:
+
+- deploy:
+ namespace: recovery
+ to: vemsd
+ recovery_image:
+ url: $recovery_img_url
+ compression: zip
+
+- deploy:
+ namespace: target
+ to: nfs
+ os: $os
+ nfsrootfs:
+ url: $nfs_rootfs
+ compression: gz
+
+- boot:
+ # Drastically increase the timeout for the boot action because of the udev
+ # issues when using TF build config "juno-all-cpu-reset-ops".
+ # TODO: Should increase the timeout only for this TF build config, not all!
+ timeout:
+ minutes: 15
+ namespace: target
+ connection-namespace: recovery
+ method: u-boot
+ commands: norflash
+ auto-login:
+ login_prompt: 'login:'
+ username: root
+ prompts:
+ - $linux_prompt
+
+- test:
+ namespace: target
+ timeout:
+ minutes: 10
+ definitions:
+ - repository:
+ metadata:
+ format: Lava-Test Test Definition 1.0
+ name: container-test-run
+ description: '"Prepare system..."'
+ os:
+ - $os
+ scope:
+ - functional
+ run:
+ steps:
+ - echo "Rebooting..."
+ from: inline
+ name: target-configure
+ path: inline/target-configure.yaml
+
+- boot:
+ timeout:
+ minutes: 15
+ namespace: target
+ connection-namespace: recovery
+ method: u-boot
+ commands: norflash
+ auto-login:
+ login_prompt: 'login:'
+ username: root
+ prompts:
+ - $linux_prompt
+EOF
diff --git a/script/gen_juno_scp_tests_scmi_yaml.sh b/script/gen_juno_scp_tests_scmi_yaml.sh
new file mode 100755
index 0000000..2422ef5
--- /dev/null
+++ b/script/gen_juno_scp_tests_scmi_yaml.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+#
+# Copyright (c) 2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Generate a YAML file in order to dispatch Juno runs on LAVA. Note that this
+# script would produce a meaningful output when run via Jenkins
+#
+# This is used exclusively to run a SCMI conformance test for SCP-Firmware on
+# Juno.
+
+ci_root="$(readlink -f "$(dirname "$0")/..")"
+source "$ci_root/utils.sh"
+source "$ci_root/juno_utils.sh"
+
+get_recovery_image_url() {
+ local build_job="tf-build"
+ local bin_mode="debug"
+
+ if upon "$jenkins_run"; then
+ echo "$jenkins_url/job/$JOB_NAME/$BUILD_NUMBER/artifact/artefacts/$bin_mode/juno_recovery.zip"
+ else
+ echo "file://$workspace/artefacts/$bin_mode/juno_recovery.zip"
+ fi
+}
+
+juno_revision="${juno_revision:-juno-r0}"
+recovery_img_url="${recovery_img_url:-$(get_recovery_image_url)}"
+
+cat <<EOF
+device_type: juno
+job_name: scp-tests-scmi-juno
+
+tags:
+- $juno_revision
+
+timeouts:
+ # Global timeout value for the whole job.
+ job:
+ minutes: 10
+ actions:
+ lava-test-monitor:
+ seconds: 180
+ connections:
+ lava-test-monitor:
+ seconds: 180
+
+priority: medium
+visibility: public
+
+actions:
+
+- deploy:
+ timeout:
+ minutes: 5
+ to: vemsd
+ recovery_image:
+ url: $recovery_img_url
+ compression: zip
+
+- boot:
+ method: minimal
+
+- test:
+ timeout:
+ minutes: 8
+
+ monitors:
+ #
+ # Monitor no.1
+ # Monitor the results from all the protocols but sensor
+ #
+ - name: SCP-SCMI-NON-SENSOR-PROTOCOL
+ start: 'BL31: Baremetal test suite: scmi'
+ end: 'Protocol Sensor'
+
+ pattern: "\\\[(base|power|system_power|performance)\\\](-|_){(?P<test_case_id>\\\D*)(.*)}(-|_)(query|power|system_power|performance|precondition)_(.*)-01: (?P<result>(CONFORMANT|NON CONFORMANT))"
+
+ fixupdict:
+ "CONFORMANT": pass
+ "NON CONFORMANT": fail
+
+ #
+ # Monitor no.2
+ # Monitor the results from the sensor protocols but for reading_get
+ #
+ - name: SCP-SCMI-SENSOR-PROTOCOL
+ start: 'SENSOR_DISCOVERY:'
+ end: 'query_sensor_description_get_non_existant_sensorid'
+
+ pattern: "\\\[(sensor)\\\](-|_){(?P<test_case_id>\\\D*)(.*)}(-|_)(query|sensor)_(.*)-01: (?P<result>(CONFORMANT|NON CONFORMANT))"
+
+ fixupdict:
+ "CONFORMANT": pass
+ "NON CONFORMANT": fail
+
+ #
+ # Monitor no.3
+ # Monitor the results from each individual sensor when performing the reading_get
+ # This special case is required since the baremetal application does not have
+ # any knowledge of the power state of the system. This results in a blind
+ # call to all the available sensors exposed by the platform, including ones
+ # tied to specific power domains that are in off state. The driver is then
+ # refusing to provide a reading for those sensors, causing a known fail for
+ # the test.
+ # The parser will therefore discard false failures.
+ #
+ - name: SCP-SCMI-SENSOR-PROTOCOL-GET
+ start: 'SENSOR_READING_GET:'
+ end: 'SCMI TEST: END'
+
+ pattern: "SENSOR ID (?P<test_case_id>\\\d+)[\\\n|\\\r](.*)MESSAGE_ID = 0x06[\\\n|\\\r](.*)PARAMETERS (.*)[\\\n|\\\r](.*)CHECK HEADER:(.*)[\\\n|\\\r](.*)CHECK STATUS: (?P<result>(PASSED|FAILED))"
+
+ fixupdict:
+ "PASSED": pass
+ "FAILED": fail
+
+
+
+ #
+ # We have already tested with one agent above and the expectations are the
+ # same for the two agents.
+ # Collect the final results.
+ #
+ - name: SCP-SCMI
+ start: 'Test Suite: SCMI 140'
+ end: 'End of Test Suite: SCMI'
+
+ pattern: "\\\[UT\\\] Test Case: (?P<test_case_id>\\\D*)(.*) Result: (?P<result>[0-9])"
+
+ fixupdict:
+ "0": pass
+ "1": fail
+ "2": fail
+ "3": fail
+ "4": fail
+ "5": fail
+ "6": fail
+ "7": fail
+ "8": fail
+ "9": fail
+
+
+
+EOF
diff --git a/script/gen_merge_report.sh b/script/gen_merge_report.sh
new file mode 100644
index 0000000..431b37a
--- /dev/null
+++ b/script/gen_merge_report.sh
@@ -0,0 +1,153 @@
+#!/bin/bash
+#
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+source $CI_ROOT/utils.sh
+REPORT_JSON=$1
+REPORT_HTML=$2
+TEST_DEF_FOLDER="${WORKSPACE}/test-definitions"
+INFO_PATH='artifact/html/lcov_report/coverage.info'
+JSON_PATH='artifact/html/output_file.json'
+BRANCH_FOLDER="scripts/tools/code_coverage/fastmodel_baremetal/bmcov/report/branch_coverage"
+BMCOV_REPORT_FOLDER="$OUTDIR/$TEST_DEF_FOLDER/scripts/tools/code_coverage/fastmodel_baremetal/bmcov/report"
+
+#################################################################
+# Create json file for input to the merge.sh for Code Coverage
+# Globals:
+# REPORT_JSON: Json file for SCP and TF ci gateway test results
+# MERGE_JSON: Json file to be used as input to the merge.sh
+# Arguments:
+# None
+# Outputs:
+# Print number of files to be merged
+################################################################
+create_merge_cfg() {
+python3 - << EOF
+import json
+import os
+
+server = os.getenv("JENKINS_URL", "https://jenkins.oss.arm.com/")
+merge_json = {} # json object
+_files = []
+with open("$REPORT_JSON") as json_file:
+ data = json.load(json_file)
+merge_number = 0
+test_results = data['test_results']
+test_files = data['test_files']
+for index, build_number in enumerate(test_results):
+ if "bmcov" in test_files[index] and test_results[build_number] == "SUCCESS":
+ merge_number += 1
+ base_url = "{}job/{}/{}/artifact/html".format(
+ server, data['job'], build_number)
+ _files.append( {'id': build_number,
+ 'config': {
+ 'type': 'http',
+ 'origin': "{}/output_file.json".format(
+ base_url)
+ },
+ 'info': {
+ 'type': 'http',
+ 'origin': "{}/lcov_report/coverage.info".format(
+ base_url)
+ }
+ })
+merge_json = { 'files' : _files }
+with open("$MERGE_JSON", 'w') as outfile:
+ json.dump(merge_json, outfile)
+print(merge_number)
+EOF
+}
+
+generate_bmcov_header() {
+ cov_html=$1
+ out_report=$2
+python3 - << EOF
+import re
+cov_html="$cov_html"
+out_report = "$out_report"
+with open(cov_html, "r") as f:
+ html_content = f.read()
+items = ["Lines", "Functions", "Branches"]
+s = """
+ <div id="div-cov">
+ <hr>
+ <table id="table-cov">
+ <tbody>
+ <tr>
+ <td>Type</td>
+ <td>Hit</td>
+ <td>Total</td>
+ <td>Coverage</td>
+ </tr>
+"""
+for item in items:
+ data = re.findall(r'<td class="headerItem">{}:</td>\n\s+<td class="headerCovTableEntry">(.+?)</td>\n\s+<td class="headerCovTableEntry">(.+?)</td>\n\s+'.format(item),
+ html_content, re.DOTALL)
+ if data is None:
+ continue
+ hit, total = data[0]
+ cov = round(float(hit)/float(total) * 100.0, 2)
+ color = "success"
+ if cov < 90:
+ color = "unstable"
+ if cov < 75:
+ color = "failure"
+ s = s + """
+ <tr>
+ <td>{}</td>
+ <td>{}</td>
+ <td>{}</td>
+ <td class='{}'>{} %</td>
+ </tr>
+""".format(item, hit, total, color, cov)
+s = s + """
+ </tbody>
+ </table>
+ <p>
+ <button onclick="window.open('artifact/$index/index.html','_blank');">Coverage Report</button>
+ </p>
+ </div>
+<script>
+ document.getElementById('tf-report-main').appendChild(document.getElementById("div-cov"));
+</script>
+"""
+with open(out_report, "a") as f:
+ f.write(s)
+EOF
+}
+OUTDIR=""
+index=""
+case "$TEST_GROUPS" in
+ scp*)
+ project="scp"
+ OUTDIR=${WORKSPACE}/reports
+ index=reports;;
+ tf*)
+ project="trusted_firmware"
+ OUTDIR=${WORKSPACE}/merge/outdir
+ index=merge/outdir;;
+ *)
+ exit 0;;
+esac
+export MERGE_JSON="$OUTDIR/merge.json"
+echo "Merging $merge_files coverage files..."
+source "$CI_ROOT/script/test_definitions.sh"
+mkdir -p $OUTDIR
+pushd $OUTDIR
+ merge_files=$(create_merge_cfg)
+ # Only merge when more than 1 test result
+ if [ "$merge_files" -lt 2 ] ; then
+ exit 0
+ fi
+ git clone $TEST_DEFINITIONS_REPO $TEST_DEF_FOLDER
+ pushd $TEST_DEF_FOLDER
+ git checkout $TEST_DEFINITIONS_REFSPEC
+ popd
+ bash $TEST_DEF_FOLDER/scripts/tools/code_coverage/fastmodel_baremetal/bmcov/report/branch_coverage/merge.sh \
+ -j $MERGE_JSON -l ${OUTDIR} -p $project
+ generate_bmcov_header ${OUTDIR}/index.html ${REPORT_HTML}
+ cp ${REPORT_HTML} $OUTDIR
+popd
diff --git a/script/get_latest_snapshot.py b/script/get_latest_snapshot.py
index a806730..6aff680 100755
--- a/script/get_latest_snapshot.py
+++ b/script/get_latest_snapshot.py
@@ -1,12 +1,13 @@
#!/usr/bin/env python3
#
-# Copyright (c) 2019, Arm Limited. All rights reserved.
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import argparse
import datetime
+import json
import os
import sys
@@ -27,28 +28,18 @@
parser = argparse.ArgumentParser()
parser.add_argument("--description", help="Snapshot description filter")
-parser.add_argument("--file", dest="output_file", help="Output file. Mandatory")
parser.add_argument("--old", default=10, help="Max snapshot age in days")
parser.add_argument("--host", default=coverity_host, help="Coverity server")
parser.add_argument("--https-port", default=coverity_port, help="Coverity Secure port")
+parser.add_argument("--auth-key-file", default=None, help="Coverity authentication file", dest="auth_key_file")
parser.add_argument("--version", help="Snapshot version filter")
parser.add_argument("stream_name")
opts = parser.parse_args()
-if not opts.output_file:
- raise Exception("Must specify an output file")
-
-# We output the snapshot ID to the specified file. In case of any errors, we
-# remove the file, and Coverity wrapper can test for its existence.
-try:
- user = os.environ["TFCIBOT_USER"]
- password = os.environ["TFCIBOT_PASSWORD"]
-except:
- print(" Unable to get credentials for user tfcibot")
- print(" For potentially faster analysis, suggest set "
- "TFCIBOT_PASSWORD and TFCIBOT_PASSWORD in the environment")
- sys.exit(0)
+token = json.load(open(opts.auth_key_file))
+user = token["username"]
+password = token["key"]
# SOAP magic stuff
client = suds.client.Client("https://{}/ws/v9/configurationservice?wsdl".format(opts.host))
@@ -79,9 +70,6 @@
# Print ID of the last snapshot if results were returned
if results:
- try:
- with open(opts.output_file, "w") as fd:
- print(results[-1].id, file=fd)
- except:
- os.remove(opts.output_file)
- raise
+ print(results[-1].id)
+else:
+ sys.exit(1)
diff --git a/script/juno_manual.py b/script/juno_manual.py
new file mode 100755
index 0000000..4dcbead
--- /dev/null
+++ b/script/juno_manual.py
@@ -0,0 +1,629 @@
+#!/usr/bin/env python3
+
+"""
+Script to automate the manual juno tests that used to require the Juno board
+to be manually power cycled.
+
+"""
+
+import argparse
+import datetime
+import enum
+import logging
+import os
+import pexpect
+import re
+import shutil
+import sys
+import time
+import zipfile
+from pexpect import pxssh
+
+################################################################################
+# Classes #
+################################################################################
+
+class CriticalError(Exception):
+ """
+ Raised when a serious issue occurs that will likely mean abort.
+ """
+ pass
+
+class TestStatus(enum.Enum):
+ """
+ This is an enum to describe possible return values from test handlers.
+ """
+ SUCCESS = 0
+ FAILURE = 1
+ CONTINUE = 2
+
+class JunoBoardManager(object):
+ """
+ Manage Juno board reservation and mounts with support for context
+ management.
+ Parameters
+ ssh (pxssh object): SSH connection to remote machine
+ password (string): sudo password for mounting/unmounting
+ """
+
+ def __init__(self, ssh, password):
+ self.ssh = ssh
+ self.path = ""
+ self.password = password
+ self.mounted = False
+
+ def reserve(self):
+ """
+ Try to reserve a Juno board.
+ """
+ for path in ["/home/login/generaljuno1", "/home/login/generaljuno2"]:
+ logging.info("Trying %s...", path)
+ self.ssh.before = ""
+ self.ssh.sendline("%s/reserve.sh" % path)
+ res = self.ssh.expect(["RESERVE_SCRIPT_SUCCESS", "RESERVE_SCRIPT_FAIL", pexpect.EOF, \
+ pexpect.TIMEOUT], timeout=10)
+ if res == 0:
+ self.path = path
+ return
+ if res == 1:
+ continue
+ else:
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+ raise CriticalError("Could not reserve a Juno board.")
+
+ def release(self):
+ """
+ Release a previously reserved Juno board.
+ """
+ if self.mounted:
+ self.unmount()
+ logging.info("Unmounted Juno storage device.")
+ if self.path == "":
+ raise CriticalError("No Juno board reserved.")
+ self.ssh.before = ""
+ self.ssh.sendline("%s/release.sh" % self.path)
+ res = self.ssh.expect(["RELEASE_SCRIPT_SUCCESS", "RELEASE_SCRIPT_FAIL", pexpect.EOF, \
+ pexpect.TIMEOUT], timeout=10)
+ if res == 0:
+ return
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+
+ def mount(self):
+ """
+ Mount the reserved Juno board storage device.
+ """
+ if self.path == "":
+ raise CriticalError("No Juno board reserved.")
+ if self.mounted:
+ return
+ self.ssh.before = ""
+ self.ssh.sendline("%s/mount.sh" % self.path)
+ res = self.ssh.expect(["password for", "MOUNT_SCRIPT_SUCCESS", pexpect.TIMEOUT, \
+ pexpect.EOF, "MOUNT_SCRIPT_FAIL"], timeout=10)
+ if res == 0:
+ self.ssh.before = ""
+ self.ssh.sendline("%s" % self.password)
+ res = self.ssh.expect(["MOUNT_SCRIPT_SUCCESS", "Sorry, try again.", pexpect.TIMEOUT, \
+ pexpect.EOF, "MOUNT_SCRIPT_FAIL"], timeout=10)
+ if res == 0:
+ self.mounted = True
+ return
+ elif res == 1:
+ raise CriticalError("Incorrect sudo password.")
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+ elif res == 1:
+ self.mounted = True
+ return
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+
+ def unmount(self):
+ """
+ Unmount the reserved Juno board storage device.
+ """
+ if self.path == "":
+ raise CriticalError("No Juno board reserved.")
+ if not self.mounted:
+ return
+ self.ssh.before = ""
+ self.ssh.sendline("%s/unmount.sh" % self.path)
+ # long timeout here since linux likes to queue file IO operations
+ res = self.ssh.expect(["password for", "UNMOUNT_SCRIPT_SUCCESS", pexpect.TIMEOUT, \
+ pexpect.EOF, "UNMOUNT_SCRIPT_FAIL"], timeout=600)
+ if res == 0:
+ self.ssh.before = ""
+ self.ssh.sendline("%s" % self.password)
+ res = self.ssh.expect(["UNMOUNT_SCRIPT_SUCCESS", "Sorry, try again.", pexpect.TIMEOUT, \
+ pexpect.EOF, "UNMOUNT_SCRIPT_FAIL"], timeout=600)
+ if res == 0:
+ self.mounted = False
+ return
+ elif res == 1:
+ raise CriticalError("Incorrect sudo password.")
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+ elif res == 1:
+ self.mounted = False
+ return
+ elif res == 2:
+ raise CriticalError("Timed out waiting for unmount.")
+ logging.error(self.ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+
+ def get_path(self):
+ """
+ Get the path to the reserved Juno board.
+ """
+ if self.path == "":
+ raise CriticalError("No Juno board reserved.")
+ return self.path
+
+ def __enter__(self):
+ # Attempt to reserve if it hasn't been done already
+ if self.path == "":
+ self.reserve()
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ self.release()
+
+################################################################################
+# Helper Functions #
+################################################################################
+
+def recover_juno(ssh, juno_board, uart):
+ """
+ If mount fails, this function attempts to power cycle the juno board, cancel
+ auto-boot, and manually enable debug USB before any potentially bad code
+ can run.
+ Parameters
+ ssh (pxssh object): ssh connection to remote machine
+ juno_board (JunoBoardManager): Juno instance to attempt to recover
+ uart (pexpect): Connection to juno uart
+ """
+ power_off(ssh, juno_board.get_path())
+ time.sleep(10)
+ power_on(ssh, juno_board.get_path())
+ # Wait for auto boot message thens end an enter press
+ res = uart.expect(["Press Enter to stop auto boot", pexpect.EOF, pexpect.TIMEOUT], timeout=60)
+ if res != 0:
+ raise CriticalError("Juno auto boot prompt not detected, recovery failed.")
+ uart.sendline("")
+ # Wait for MCC command prompt then send "usb_on"
+ res = uart.expect(["Cmd>", pexpect.EOF, pexpect.TIMEOUT], timeout=10)
+ if res != 0:
+ raise CriticalError("Juno MCC prompt not detected, recovery failed.")
+ uart.sendline("usb_on")
+ # Wait for debug usb confirmation
+ res = uart.expect(["Enabling debug USB...", pexpect.EOF, pexpect.TIMEOUT], timeout=10)
+ if res != 0:
+ raise CriticalError("Debug usb not enabled, recovery failed.")
+ # Dead wait for linux to detect the USB device then try to mount again
+ time.sleep(10)
+ juno_board.mount()
+
+def copy_file_to_remote(source, dest, remote, user, password):
+ """
+ Uses SCP to copy a file to a remote machine.
+ Parameters
+ source (string): Source path
+ dest (string): Destination path
+ remote (string): Name or IP address of remote machine
+ user (string): Username to login with
+ password (string): Password to login/sudo with
+ """
+ scp = "scp -r %s %s@%s:%s" % (source, user, remote, dest)
+ copy = pexpect.spawn(scp)
+ copy.expect("password:")
+ copy.sendline(password)
+ res = copy.expect([pexpect.EOF, pexpect.TIMEOUT], timeout=600)
+ if res == 0:
+ copy.close()
+ if copy.exitstatus == 0:
+ return
+ raise CriticalError("Unexpected error occurred during SCP: %d" % copy.exitstatus)
+ elif res == 1:
+ raise CriticalError("SCP operation timed out.")
+ raise CriticalError("Unexpected pexpect result: %d" % res)
+
+def extract_zip_file(source, dest):
+ """
+ Extracts a zip file on the local machine.
+ Parameters
+ source (string): Path to input zip file
+ dest (string): Path to output directory
+ """
+ try:
+ with zipfile.ZipFile(source, 'r') as src:
+ src.extractall(dest)
+ return
+ except Exception:
+ raise CriticalError("Could not extract boardfiles.")
+
+def remote_copy(ssh, source, dest):
+ """
+ Copy files from remote workspace to Juno directory using rsync
+ Parameters
+ ssh (pxssh object): Connection to remote system
+ source (string): Source file path
+ dest (string): Destination file path
+ """
+ ssh.before = ""
+ ssh.sendline("rsync -rt %s %s" % (source, dest))
+ res = ssh.expect(["$", pexpect.EOF, pexpect.TIMEOUT], timeout=60)
+ if res != 0:
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("Unexpected error occurred during rsync operation.")
+ ssh.before = ""
+ ssh.sendline("echo $?")
+ res = ssh.expect(["0", pexpect.EOF, pexpect.TIMEOUT], timeout=10)
+ if res != 0:
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("rsync failed")
+ return
+
+def connect_juno_uart(host, port):
+ """
+ Spawn a pexpect object for the Juno UART
+ Parameters
+ host (string): Telnet host name or IP addres
+ port (int): Telnet port number
+ Returns
+ pexpect object if successful
+ """
+ uart = pexpect.spawn("telnet %s %d" % (host, port))
+ result = uart.expect(["Escape character is", pexpect.EOF, pexpect.TIMEOUT], timeout=10)
+ if result == 0:
+ return uart
+ raise CriticalError("Could not connect to Juno UART.")
+
+def get_uart_port(ssh, juno):
+ """
+ Get the telnet port for the Juno UART
+ Parameters
+ ssh (pxssh object): SSH session to remote machine
+ Returns
+ int: Telnet port number
+ """
+ ssh.before = ""
+ ssh.sendline("cat %s/telnetport" % juno)
+ res = ssh.expect([pexpect.TIMEOUT], timeout=1)
+ if res == 0:
+ match = re.search(r"port: (\d+)", ssh.before.decode("utf-8"))
+ if match:
+ return int(match.group(1))
+ raise CriticalError("Could not get telnet port.")
+
+def power_off(ssh, juno):
+ """
+ Power off the Juno board
+ Parameters
+ ssh (pxssh object): SSH session to remote machine
+ juno (string): Path to Juno directory on remote
+ """
+ ssh.before = ""
+ ssh.sendline("%s/poweroff.sh" % juno)
+ res = ssh.expect(["POWEROFF_SCRIPT_SUCCESS", pexpect.EOF, pexpect.TIMEOUT, \
+ "POWEROFF_SCRIPT_FAIL"], timeout=10)
+ if res == 0:
+ return
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("Could not power off the Juno board.")
+
+def power_on(ssh, juno):
+ """
+ Power on the Juno board
+ Parameters
+ ssh (pxssh object): SSH session to remote machine
+ juno (string): Path to Juno directory on remote
+ """
+ ssh.before = ""
+ ssh.sendline("%s/poweron.sh" % juno)
+ res = ssh.expect(["POWERON_SCRIPT_SUCCESS", pexpect.EOF, pexpect.TIMEOUT, \
+ "POWERON_SCRIPT_FAIL"], timeout=10)
+ if res == 0:
+ return
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("Could not power on the Juno board.")
+
+def erase_juno(ssh, juno):
+ """
+ Erase the mounted Juno storage device
+ Parameters
+ ssh (pxssh object): SSH session to remote machine
+ juno (string): Path to Juno directory on remote
+ """
+ ssh.before = ""
+ ssh.sendline("%s/erasejuno.sh" % juno)
+ res = ssh.expect(["ERASEJUNO_SCRIPT_SUCCESS", "ERASEJUNO_SCRIPT_FAIL", pexpect.EOF, \
+ pexpect.TIMEOUT], timeout=30)
+ if res == 0:
+ return
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("Could not erase the Juno storage device.")
+
+def erase_juno_workspace(ssh, juno):
+ """
+ Erase the Juno workspace
+ Parameters
+ ssh (pxssh object): SSH session to remote machine
+ juno (string): Path to Juno directory on remote
+ """
+ ssh.before = ""
+ ssh.sendline("%s/eraseworkspace.sh" % juno)
+ res = ssh.expect(["ERASEWORKSPACE_SCRIPT_SUCCESS", "ERASEWORKSPACE_SCRIPT_FAIL", pexpect.EOF, \
+ pexpect.TIMEOUT], timeout=30)
+ if res == 0:
+ return
+ logging.error(ssh.before.decode("utf-8"))
+ raise CriticalError("Could not erase the remote workspace.")
+
+def process_uart_output(uart, timeout, handler, telnethost, telnetport):
+ """
+ This function receives UART data from the Juno board, creates a full line
+ of text, then passes it to a test handler function.
+ Parameters
+ uart (pexpect): Pexpect process containing UART telnet session
+ timeout (int): How long to wait for test completion.
+ handler (function): Function to pass each line of test output to.
+ telnethost (string): Telnet host to use if uart connection fails.
+ telnetport (int): Telnet port to use if uart connection fails.
+ """
+ # Start timeout counter
+ timeout_start = datetime.datetime.now()
+
+ line = ""
+ while True:
+ try:
+ # Check if timeout has expired
+ elapsed = datetime.datetime.now() - timeout_start
+ if elapsed.total_seconds() > timeout:
+ raise CriticalError("Test timed out, see log file.")
+
+ # Read next character from Juno
+ char = uart.read_nonblocking(size=1, timeout=1).decode("utf-8")
+ if '\n' in char:
+ logging.info("JUNO: %s", line)
+
+ result = handler(uart, line)
+ if result == TestStatus.SUCCESS:
+ return
+ elif result == TestStatus.FAILURE:
+ raise CriticalError("Test manager returned TestStatus.FAILURE")
+
+ line = ""
+ else:
+ line = line + char
+
+ # uart.read_nonblocking will throw timeouts a lot by design so catch and ignore
+ except pexpect.TIMEOUT:
+ continue
+ except pexpect.EOF:
+ logging.warning("Connection lost unexpectedly, attempting to restart.")
+ try:
+ uart = connect_juno_uart(telnethost, telnetport)
+ except CriticalError:
+ raise CriticalError("Could not reopen Juno UART")
+ continue
+ except OSError:
+ raise CriticalError("Unexpected OSError occurred.")
+ except UnicodeDecodeError:
+ continue
+ except Exception as e:
+ # This case exists to catch any weird or rare exceptions.
+ raise CriticalError("Unexpected exception occurred: %s" % str(e))
+
+################################################################################
+# Test Handlers #
+################################################################################
+
+TEST_CASE_TFTF_MANUAL_PASS_COUNT = 0
+TEST_CASE_TFTF_MANUAL_CRASH_COUNT = 0
+TEST_CASE_TFTF_MANUAL_FAIL_COUNT = 0
+def test_case_tftf_manual(uart, line):
+ """
+ This function handles TFTF tests and parses the output into a pass or fail
+ result. Any crashes or fails result in an overall test failure but skips
+ and passes are fine.
+ """
+ global TEST_CASE_TFTF_MANUAL_PASS_COUNT
+ global TEST_CASE_TFTF_MANUAL_CRASH_COUNT
+ global TEST_CASE_TFTF_MANUAL_FAIL_COUNT
+
+ # This test needs to be powered back on a few times
+ if "Board powered down, use REBOOT to restart." in line:
+ # time delay to let things finish up
+ time.sleep(3)
+ uart.sendline("reboot")
+ return TestStatus.CONTINUE
+
+ elif "Tests Passed" in line:
+ match = re.search(r"Tests Passed : (\d+)", line)
+ if match:
+ TEST_CASE_TFTF_MANUAL_PASS_COUNT = int(match.group(1))
+ return TestStatus.CONTINUE
+ logging.error(r"Error parsing line: %s", line)
+ return TestStatus.FAILURE
+
+ elif "Tests Failed" in line:
+ match = re.search(r"Tests Failed : (\d+)", line)
+ if match:
+ TEST_CASE_TFTF_MANUAL_FAIL_COUNT = int(match.group(1))
+ return TestStatus.CONTINUE
+ logging.error("Error parsing line: %s", line)
+ return TestStatus.FAILURE
+
+ elif "Tests Crashed" in line:
+ match = re.search(r"Tests Crashed : (\d+)", line)
+ if match:
+ TEST_CASE_TFTF_MANUAL_CRASH_COUNT = int(match.group(1))
+ return TestStatus.CONTINUE
+ logging.error("Error parsing line: %s", line)
+ return TestStatus.FAILURE
+
+ elif "Total tests" in line:
+ if TEST_CASE_TFTF_MANUAL_PASS_COUNT == 0:
+ return TestStatus.FAILURE
+ if TEST_CASE_TFTF_MANUAL_CRASH_COUNT > 0:
+ return TestStatus.FAILURE
+ if TEST_CASE_TFTF_MANUAL_FAIL_COUNT > 0:
+ return TestStatus.FAILURE
+ return TestStatus.SUCCESS
+
+ return TestStatus.CONTINUE
+
+TEST_CASE_LINUX_MANUAL_SHUTDOWN_HALT_SENT = False
+def test_case_linux_manual_shutdown(uart, line):
+ """
+ This handler performs a linux manual shutdown test by waiting for the linux
+ prompt and sending the appropriate halt command, then waiting for the
+ expected output.
+ """
+ global TEST_CASE_LINUX_MANUAL_SHUTDOWN_HALT_SENT
+
+ # Look for Linux prompt
+ if "/ #" in line and TEST_CASE_LINUX_MANUAL_SHUTDOWN_HALT_SENT is False:
+ time.sleep(3)
+ uart.sendline("halt -f")
+ TEST_CASE_LINUX_MANUAL_SHUTDOWN_HALT_SENT = True
+ return TestStatus.CONTINUE
+
+ # Once halt command has been issued, wait for confirmation
+ elif "reboot: System halted" in line:
+ return TestStatus.SUCCESS
+
+ # For any other result, continue.
+ return TestStatus.CONTINUE
+
+################################################################################
+# Script Main #
+################################################################################
+
+def main():
+ """
+ Main function, handles the initial set up and test dispatch to Juno board.
+ """
+ parser = argparse.ArgumentParser(description="Launch a Juno manual test.")
+ parser.add_argument("host", type=str, help="Name or IP address of Juno host system.")
+ parser.add_argument("username", type=str, help="Username to login to host system.")
+ parser.add_argument("password", type=str, help="Password to login to host system.")
+ parser.add_argument("boardfiles", type=str, help="ZIP file containing Juno boardfiles.")
+ parser.add_argument("workspace", type=str, help="Directory for scratch files.")
+ parser.add_argument("testname", type=str, help="Name of test to run.")
+ parser.add_argument("timeout", type=int, help="Time to wait for test completion.")
+ parser.add_argument("logfile", type=str, help="Path to log file to create.")
+ parser.add_argument("-l", "--list", action='store_true', help="List supported test cases.")
+ args = parser.parse_args()
+
+ # Print list if requested
+ if args.list:
+ print("Supported Tests")
+ print(" tftf-manual-generic - Should work for all TFTF tests.")
+ print(" linux-manual-shutdown - Waits for Linux prompt and sends halt command.")
+ exit(0)
+
+ # Start logging
+ print("Creating log file: %s" % args.logfile)
+ logging.basicConfig(filename=args.logfile, level=logging.DEBUG, \
+ format="[%(asctime)s] %(message)s", datefmt="%I:%M:%S")
+ logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
+
+ # Make sure test name is supported so we don't waste time if it isn't.
+ if args.testname == "tftf-manual-generic":
+ test_handler = test_case_tftf_manual
+ elif args.testname == "linux-manual-shutdown":
+ test_handler = test_case_linux_manual_shutdown
+ else:
+ logging.error("Test name \"%s\" invalid or not supported.", args.testname)
+ exit(1)
+ logging.info("Selected test \"%s\"", args.testname)
+
+ # Helper functions either succeed or raise CriticalError so no error checking is done here
+ try:
+ # Start SSH session to host machine
+ logging.info("Starting SSH session to remote machine.")
+ ssh = pxssh.pxssh()
+ if not ssh.login(args.host, args.username, args.password):
+ raise CriticalError("Could not start SSH session.")
+ # Disable character echo
+ ssh.sendline("stty -echo")
+ with ssh:
+
+ # Try to reserve a juno board
+ juno_board = JunoBoardManager(ssh, args.password)
+ juno_board.reserve()
+ juno = juno_board.get_path()
+ logging.info("Reserved %s", juno)
+ with juno_board:
+
+ # Get UART port and start telnet session
+ logging.info("Opening Juno UART")
+ port = get_uart_port(ssh, juno)
+ logging.info("Using telnet port %d", port)
+ uart = connect_juno_uart(args.host, port)
+ with uart:
+
+ # Extract boardfiles locally
+ logging.info("Extracting boardfiles.")
+ local_boardfiles = os.path.join(args.workspace, "boardfiles")
+ if os.path.exists(local_boardfiles):
+ shutil.rmtree(local_boardfiles)
+ os.mkdir(local_boardfiles)
+ extract_zip_file(args.boardfiles, local_boardfiles)
+
+ # Clear out the workspace directory on the remote system
+ logging.info("Erasing remote workspace.")
+ erase_juno_workspace(ssh, juno)
+
+ # SCP boardfiles to juno host
+ logging.info("Copying boardfiles to remote system.")
+ copy_file_to_remote(os.path.join(local_boardfiles), \
+ os.path.join(juno, "workspace"), args.host, args.username, args.password)
+
+ # Try to mount the storage device
+ logging.info("Mounting the Juno storage device.")
+ try:
+ juno_board.mount()
+ except CriticalError:
+ logging.info("Mount failed, attempting to recover Juno board.")
+ recover_juno(ssh, juno_board, uart)
+ logging.info("Juno board recovered.")
+
+ # Move boardfiles from temp directory to juno storage
+ logging.info("Copying new boardfiles to storage device.")
+ remote_copy(ssh, os.path.join(juno, "workspace", "boardfiles", "*"), \
+ os.path.join(juno, "juno"))
+
+ # Unmounting the juno board.
+ logging.info("Unmounting Juno storage device and finishing pending I/O.")
+ juno_board.unmount()
+
+ # Power cycle the juno board to reboot it. */
+ logging.info("Rebooting the Juno board.")
+ power_off(ssh, juno)
+ # dead wait to let the power supply do its thing
+ time.sleep(10)
+ power_on(ssh, juno)
+
+ # dead wait to let the power supply do its thing
+ time.sleep(10)
+
+ # Process UART output and wait for test completion
+ process_uart_output(uart, args.timeout, test_handler, args.host, port)
+ logging.info("Tests Passed!")
+
+ except CriticalError as exception:
+ logging.error(str(exception))
+ exit(1)
+
+ # Exit with 0 on successful finish
+ exit(0)
+
+################################################################################
+# Script Entry Point #
+################################################################################
+
+if __name__ == "__main__":
+ main()
diff --git a/script/make_stress_test_image.sh b/script/make_stress_test_image.sh
index 30c6dfb..5defc9f 100755
--- a/script/make_stress_test_image.sh
+++ b/script/make_stress_test_image.sh
@@ -91,7 +91,7 @@
echo
rm -rf "test_assets"
echo "Cloning test assets..."
-git clone -q --depth 1 http://ssg-sw.cambridge.arm.com/gerrit/tests/test_assets.git
+git clone -q --depth 1 https://gerrit.oss.arm.com/tests/test_assets
echo "Cloned test assets."
cd test_assets
diff --git a/script/run_package.sh b/script/run_package.sh
index a9d91d2..8c1b57a 100755
--- a/script/run_package.sh
+++ b/script/run_package.sh
@@ -56,7 +56,7 @@
# data (Host CPU time spent running in User and System). Safely
# kill the model by using SIGINT(^C) that helps in printing
# statistical data.
- if [ "$pid" == "$model_pid" ]; then
+ if [ "$pid" == "$model_pid" ] && [ "${COVERAGE_ON}" != "1" ]; then
model_cid=$(pgrep -P "$model_pid" | xargs)
# ignore errors
kill -SIGINT "$model_cid" &>/dev/null || true
@@ -125,6 +125,10 @@
source "run/env"
fi
+# Source model environment for run
+if [ -f "run/model_env" ]; then
+ source "run/model_env"
+fi
# Fail if there was no model path set
if [ -z "$model_path" ]; then
die "No model path set by package!"
diff --git a/script/scan_build_wrapper.sh b/script/scan_build_wrapper.sh
new file mode 100644
index 0000000..5b52d5f
--- /dev/null
+++ b/script/scan_build_wrapper.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+scan_build_wrapper(){
+
+ local make_command="$(echo $@)"
+ local cross_compile="$(grep -oP "(?<=CROSS_COMPILE=)[a-z\-0-9]+" <<< $make_command)"
+ local build_config="$(echo $make_command | awk 'BEGIN {FS = "make "}{print $2}')"
+ local scan_build_flags="-v -analyze-headers -analyzer-config stable-report-filename=true "
+
+ # scan_build generates html and .js files to render bugs on code base
+ reports_dir="$workspace/scan-build-reports/"
+
+ # Get approprtiate compiler path
+ scan_build_flags+=" -o $reports_dir --use-cc=$(which ${cross_compile}gcc) \
+ --analyzer-target=${cross_compile}"
+
+ # Workaround a limiation in jenkins arch-dev nodes
+ if [ "$JENKINS_HOME" ]; then
+ export PATH=/usr/lib/llvm-6.0/bin/:$PATH
+ echo_w "Jenkins runs"
+ scan_build_artefacts="$BUILD_URL/artifact/artefacts/debug/scan-build-reports"
+ else
+ echo_w "Local runs"
+ scan_build_artefacts="$artefacts/debug/scan-build-reports"
+ fi
+
+ echo_w "Build config selected: $tf_config"
+ make realclean
+
+ local build_info=$(scan-build ${scan_build_flags} $make_command)
+ result_loc=$(echo $build_info | awk 'BEGIN {FS = "scan-view "}{print $2}' \
+ | awk 'BEGIN {FS = " to examine bug reports"}{print $1}' \
+ | awk '{ gsub("[:\47]" , ""); print $0}')
+
+ if [ -d $result_loc ]; then
+ local defects="$(find $result_loc -iname 'report*.html'| wc -l)"
+ if [ $defects -ge 1 ]; then
+ echo_w "$defects defect(s) found in build \"$build_config\" "
+ echo_w "Please view the detailed report here:"
+ echo_w "$scan_build_artefacts/$tf_config-reports/index.html"
+ fi
+ mv "$result_loc" "$reports_dir/$tf_config-reports"
+ fi
+}
diff --git a/script/send_artefacts.sh b/script/send_artefacts.sh
new file mode 100755
index 0000000..b9c90e4
--- /dev/null
+++ b/script/send_artefacts.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# First parameter must be folder name
+if [ $# -eq 0 ]; then
+ echo "No folder name supplied."
+ exit 1
+fi
+
+folder_name="$1"
+archive_name="$1.tar.xz"
+
+pushd "$workspace"
+
+# Archive
+tar -cJf "$archive_name" "$folder_name"
+
+where="$artefacts_receiver/${TEST_GROUP:?}/${TEST_CONFIG:?}/$archive_name"
+where+="?j=$JOB_NAME&b=$BUILD_NUMBER"
+
+# Send
+if wget -q --method=PUT --body-file="$archive_name" "$where"; then
+ echo "$folder_name submitted to $where."
+else
+ echo "Error submitting $folder_name to $where."
+fi
+
+popd
diff --git a/script/test_definitions.sh b/script/test_definitions.sh
new file mode 100644
index 0000000..ec03c89
--- /dev/null
+++ b/script/test_definitions.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+#
+# Copyright (c) 2019-2020, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+export GERRIT_URL=${GERRIT_URL:-https://gerrit.oss.arm.com}
+export TEST_DEFINITIONS_REPO=${TEST_DEFINITIONS_REPO:-${GERRIT_URL}/tests/lava/test-definitions.git}
+export TEST_DEFINITIONS_REFSPEC=${TEST_DEFINITIONS_REFSPEC:-tools-coverage-workflow_2020-05-27}