Initial commit for TF-A CI scripts
Signed-off-by: Fathi Boudra <fathi.boudra@linaro.org>
diff --git a/job/tf-worker/generate_yaml.sh b/job/tf-worker/generate_yaml.sh
new file mode 100755
index 0000000..37a0ae0
--- /dev/null
+++ b/job/tf-worker/generate_yaml.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+if echo "$RUN_CONFIG" | grep -iq 'tftf'; then
+ payload_type="tftf"
+else
+ payload_type="linux"
+fi
+
+"$CI_ROOT/script/parse_lava_job.py" --payload-type "$payload_type"
diff --git a/job/tf-worker/is_juno_config.sh b/job/tf-worker/is_juno_config.sh
new file mode 100755
index 0000000..31affc9
--- /dev/null
+++ b/job/tf-worker/is_juno_config.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+# If we're skipping LAVA or Juno
+if [ "$skip_juno" ] || [ "$skip_runs" ]; then
+ exit 1
+fi
+
+# For Juno runs, we need let the board download build artefacts using a URL. The
+# only way to have a board-accessible URL at the moment is to have build
+# artefacts archived. Therefore, only for Juno do we spawn the build as a
+# separate job; otherwise, we build within this job.
+if echo "$RUN_CONFIG" | grep -iq '^juno'; then
+ exit 0
+else
+ exit 1
+fi
diff --git a/job/tf-worker/manage_artefacts.sh b/job/tf-worker/manage_artefacts.sh
new file mode 100755
index 0000000..865afd9
--- /dev/null
+++ b/job/tf-worker/manage_artefacts.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+if [ -d artefacts ]; then
+ # Remove everything except logs
+ find artefacts -type f -not \( -name "*.log" \) -exec rm -f {} +
+fi
diff --git a/job/tf-worker/parse_test.sh b/job/tf-worker/parse_test.sh
new file mode 100755
index 0000000..fead3a7
--- /dev/null
+++ b/job/tf-worker/parse_test.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+# Parse test config. This produces $workspace/env file
+$CI_ROOT/script/parse_test.sh
diff --git a/job/tf-worker/run_fvp_test.sh b/job/tf-worker/run_fvp_test.sh
new file mode 100755
index 0000000..2a62eab
--- /dev/null
+++ b/job/tf-worker/run_fvp_test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+# Build
+"$CI_ROOT/script/build_package.sh"
+
+if [ "$skip_runs" ]; then
+ exit 0
+fi
+
+# Execute test locally for FVP configs
+if [ "$RUN_CONFIG" != "nil" ] && echo "$RUN_CONFIG" | grep -iq '^fvp'; then
+ "$CI_ROOT/script/run_package.sh"
+fi
diff --git a/job/tf-worker/should_build_local.sh b/job/tf-worker/should_build_local.sh
new file mode 100755
index 0000000..5b47866
--- /dev/null
+++ b/job/tf-worker/should_build_local.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -e
+
+# If it's a Juno build-only config, or an FVP config, we do everything locally
+if [ "$RUN_CONFIG" = "nil" ]; then
+ exit 0
+fi
+
+case "$RUN_CONFIG" in
+ fvp-*)
+ exit 0;;
+ coverity-*)
+ exit 0;;
+esac
+
+# If we're not going to run Juno, then no need to spawn tf-build-for lava;
+# build it locally.
+if [ "$skip_juno" ]; then
+ exit 0
+fi
+
+exit 1
diff --git a/job/tf-worker/submit_lava_job.sh b/job/tf-worker/submit_lava_job.sh
new file mode 100755
index 0000000..7b47e97
--- /dev/null
+++ b/job/tf-worker/submit_lava_job.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# Copyright (c) 2019, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Submit jobs to LAVA and wait until the job is complete. This script replace
+# the "managed script" previously used and provide the same behavior.
+#
+# Required arguments:
+# 1: yaml job file
+# 2: flag whether to save output, true/false, defaults to false
+#
+# output:
+# job_results.yaml
+# job_output.log if save output = true
+
+set -e
+
+JOB_FILE="$1"
+SAVE_OUTPUT="$2"
+
+LAVA_HOST=
+LAVA_USER=
+LAVA_TOKEN=
+LAVA_URL=
+
+if [ ! -f "${JOB_FILE}" ]; then
+ echo "error: LAVA job file does not exist: ${JOB_FILE}"
+ exit 1
+fi
+
+# Install lavacli with fixes
+virtualenv -p $(which python3) venv
+source venv/bin/activate
+pip install -q lavacli
+
+# Configure lavacli
+lavacli identities add \
+--username $LAVA_USER \
+--token $LAVA_TOKEN \
+--uri ${LAVA_URL}/RPC2 \
+default
+
+# Submit a job using lavacli
+JOB_ID=$(lavacli jobs submit ${JOB_FILE})
+if [ -z "$JOB_ID" ] ; then
+ echo "Couldn't submit. Stopping."
+ exit 1
+fi
+
+echo "Job url: https://lava.oss.arm.com/scheduler/job/$JOB_ID"
+
+# Wait for the job to finish
+lavacli jobs wait $JOB_ID
+
+if [ "${SAVE_OUTPUT}" = "true" ] ; then
+ lavacli jobs logs $JOB_ID > job_output.log
+fi
+
+# Get results
+lavacli results $JOB_ID --yaml > job_results.yaml
+
+# Exit virtualenv
+deactivate