vizualisation: Add tf-main vizualization script
Add `tf-a-main-results.{bash,plot}` to fetch test results from a Jenkins
build of tf-main and vizualise them. Update README.rst with usage info
for the new script and plot description.
Change-Id: I837a2dcb4cd3247b24ba9eec6557699e9a75e9c2
Signed-off-by: Nathan Dunne <Nathan.Dunne@arm.com>
Signed-off-by: Harrison Mutai <harrison.mutai@arm.com>
diff --git a/script/gen_results_report.py b/script/gen_results_report.py
new file mode 100755
index 0000000..64918b2
--- /dev/null
+++ b/script/gen_results_report.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python3
+#
+# Copyright (c) 2022, Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+import argparse
+import csv
+import os
+
+from gen_test_report import emit_header, print_error_message
+
+TABLE_HEADER = """\
+<table id="tf-results-panel">
+ <tbody>
+ <tr>
+ <td class="results-col">
+ <table>
+ <tbody>
+ <thead>
+ <tr>
+ <th>Passed</th>
+ <th>Skipped</th>
+ <th>Crashed</th>
+ <th>Failed</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>"""
+
+TABLE_FOOTER = """\
+ </tr>
+ </tbody>
+ </tbody>
+ </table>
+ </td>
+ <td class="button-col">
+ <button id="tf-download-button" onclick="window.open('{}','_blank')">Download Plot</button>
+ </td>
+ </tr>
+ </tbody>
+</table>"""
+
+# Open and sum the results of a comma-separated test result file.
+def fetch_results(csv_path):
+ failed = passed = skipped = crashed = 0
+ with open(csv_path, "r") as fd:
+ for test in csv.DictReader(fd):
+ failed = failed + int(test["Failed"])
+ passed = passed + int(test["Passed"])
+ skipped = skipped + int(test["Skipped"])
+ crashed = crashed + int(test["Crashed"])
+ return passed, skipped, crashed, failed
+
+
+def main(fd, csv_path, png_path):
+ results_row = ("""<td>{}</td>\n""".rjust(28) * 4).format(
+ *fetch_results(csv_path)
+ )
+
+ # Emite header and style sheet.
+ emit_header(fd)
+
+ print(TABLE_HEADER, file=fd)
+ print(results_row, file=fd)
+
+ # Format table button to link to full-size plot of results.
+ TABLE_FOOTER.format(build_url + "/" + png_path)
+ print(TABLE_FOOTER, file=fd)
+
+
+if __name__ == "__main__":
+ global build_url
+
+ parser = argparse.ArgumentParser()
+
+ # Add arguments
+ parser.add_argument(
+ "--output",
+ "-o",
+ default="report.html",
+ help="Path to output file",
+ )
+ parser.add_argument(
+ "--csv",
+ default=None,
+ help="Path to input CSV with data for the target job",
+ )
+ parser.add_argument(
+ "--png", default=None, help="Filename for PNG results plot"
+ )
+
+ # The build url and target for the results are needed in-case the user hasn't provided a
+ # paths to the inputs.
+ opts = parser.parse_args()
+ build_url = os.environ["BUILD_URL"]
+ target_job = os.environ["TARGET_BUILD"]
+ target_job_name = target_job[: target_job.find("/")]
+
+ # Use filenames provided by user or try and infer based off the target name from the environment.
+ output_path = "report.html" if not opts.output else opts.output
+ csv_path = target_job_name + "results.csv" if not opts.csv else opts.csv
+ png_path = target_job_name + "results.png" if not opts.png else opts.png
+
+ with open(output_path, "w") as fd:
+ try:
+ main(output_path, csv_path, png_path)
+ except:
+ print_error_message(fd)
+ raise
diff --git a/script/gen_test_report.css b/script/gen_test_report.css
index 9a11600..07c298d 100644
--- a/script/gen_test_report.css
+++ b/script/gen_test_report.css
@@ -130,3 +130,51 @@
display: inline-block;
padding: 2px 3px 0px 3px;
}
+
+#tf-results-panel {
+ margin-bottom: 20px;
+ margin-top: 20px;
+ padding: 5px 10px;
+}
+
+.results-col {
+ padding: 5px 10px;
+}
+
+.results-col table {
+ border-collapse: collapse;
+ vertical-align: middle;
+}
+
+.results-col td,
+.results-col th {
+ text-align: center;
+ padding: 5px 10px;
+ font: 20px monospace;
+ border: 1px #c8c8c8 solid;
+}
+
+.results-col th {
+ font-weight: bold;
+ background-color: #e0f7ff;
+}
+.button-col {
+ margin-left: 15px;
+}
+
+#tf-results-panel .button-col {
+ border-collapse: collapse;
+ font: 10px monospace;
+ padding: 25px;
+}
+
+#tf-download-button {
+ background-color: #4b758b;
+ border: 1px solid #aaa;
+ color: #eee;
+ display: block;
+ font-weight: bold;
+ padding: 5px 10px;
+ margin-bottom: 10px;
+ width: 250px;
+}
diff --git a/script/gen_test_report.py b/script/gen_test_report.py
index 965aaa0..d32b118 100755
--- a/script/gen_test_report.py
+++ b/script/gen_test_report.py
@@ -327,6 +327,34 @@
# Return result as string
return "".join(crumbs)
+# Emit style sheet, and script elements.
+def emit_header(fd):
+ stem = os.path.splitext(os.path.abspath(__file__))[0]
+ for tag, ext in [("style", "css"), ("script", "js")]:
+ print(open_element(tag), file=fd)
+ with open(os.extsep.join([stem, ext])) as ext_fd:
+ shutil.copyfileobj(ext_fd, fd)
+ print(close_element(tag), file=fd)
+
+def print_error_message(fd):
+ # Upon error, create a static HTML reporting the error, and then raise
+ # the latent exception again.
+ fd.seek(0, io.SEEK_SET)
+
+ # Provide inline style as there won't be a page header for us.
+ err_style = (
+ "border: 1px solid red;",
+ "color: red;",
+ "font-size: 30px;",
+ "padding: 15px;"
+ )
+
+ print(make_element("div",
+ "HTML report couldn't be prepared! Check job console.",
+ style=" ".join(err_style)), file=fd)
+
+ # Truncate file as we're disarding whatever there generated before.
+ fd.truncate()
def main(fd):
global Build_job, Jenkins, Job
@@ -347,6 +375,9 @@
opts = parser.parse_args()
workspace = os.environ["WORKSPACE"]
+
+ emit_header(fd)
+
if not opts.from_json:
json_obj = {}
@@ -431,13 +462,7 @@
run_node.set_result(test_result, build_number)
run_node.set_desc(os.path.join(workspace, f))
- # Emit style sheet, script, and page header elements
- stem = os.path.splitext(os.path.abspath(__file__))[0]
- for tag, ext in [("style", "css"), ("script", "js")]:
- print(open_element(tag), file=fd)
- with open(os.extsep.join([stem, ext])) as ext_fd:
- shutil.copyfileobj(ext_fd, fd)
- print(close_element(tag), file=fd)
+ # Emit page header element
print(PAGE_HEADER, file=fd)
begin_table(results, fd)
@@ -481,27 +506,10 @@
with open(REPORT_JSON, "wt") as json_fd:
json.dump(json_obj, json_fd, indent=2)
-
-with open(REPORT, "wt") as fd:
- try:
- main(fd)
- except:
- # Upon error, create a static HTML reporting the error, and then raise
- # the latent exception again.
- fd.seek(0, io.SEEK_SET)
-
- # Provide inline style as there won't be a page header for us.
- err_style = (
- "border: 1px solid red;",
- "color: red;",
- "font-size: 30px;",
- "padding: 15px;"
- )
-
- print(make_element("div",
- "HTML report couldn't be prepared! Check job console.",
- style=" ".join(err_style)), file=fd)
-
- # Truncate file as we're disarding whatever there generated before.
- fd.truncate()
- raise
+if __name__ == "__main__":
+ with open(REPORT, "wt") as fd:
+ try:
+ main(fd)
+ except:
+ print_error_message(fd)
+ raise
diff --git a/script/graphs/README.rst b/script/graphs/README.rst
index 0860cb2..b057943 100644
--- a/script/graphs/README.rst
+++ b/script/graphs/README.rst
@@ -41,6 +41,66 @@
.. code-block::
- bash ../<this-repo>/script/graph/sloc-viz.bash > sloc.png 2> sloc.tsv
+ bash ../<this-repo>/script/graphs/sloc-viz.bash > sloc.png 2> sloc.tsv
+
+Test Results
+------------
+
+The script `tf-main-results.bash` uses curl to retrieve test results for a
+tf-a-main Jenkins job, and generates a CSV and stacked histogram PNG of the
+combined data.
+
+Usage:
+======
+
+ bash tf-main-results.bash <jenkins-url> [ci_gateway] [filter]
+
+The Jenkins URL is the URL for the target build job.
+
+ https://ci.trustedfirmware.org/job/tf-a-main/1/
+
+The sub-builds for this job will all be queried to find the ones that contain
+tests, ignoring child builds that only build and don't run any platform tests.
+
+`tf-a-ci-gateway` is the default gateway, although, different gateways may be
+specified with the optional "ci_gateway" argument. This option will be combined
+with the build numbers and the base Jenkins URL to retrieve the results of
+sub-builds.
+
+This can be filtered further using the optional "filter" argument, which will
+select only test groups that match the provided regex.
+
+Example Useful Queries
+======================
+
+Only show tests running the test framework:
+
+ bash tf-main-results.bash <jenkins-url> [ci_gateway] "tftf"
+
+Only show tests for N1SDP & Juno platforms:
+
+ bash tf-main-results.bash <jenkins-url> [ci_gateway] "n1sdp|juno"
+
+Only show boot tests on FVP platforms:
+
+ bash tf-main-results.bash <jenkins-url> [ci_gateway] 'fvp.*boot'
+
+Note: for filters that return a small number of test groups, the graph is not
+ideal as it is sized for a large number. A CSV file of the data is also produced,
+however, so that you can use it to create your own graph, if required.
+
+Additional Config
+=================
+
+The script also allows the three output files to be configured via ENV variables:
+
+ PNGFILE=out.png CSVFILE=out.csv bash tf-main-results.bash
+
+If they are not set then default values based on the build number will be generated:
+
+ PNGFILE_DEFAULT="tf-main_${build_number}.png"
+ CSVFILE_DEFAULT="tf-main_${build_number}.csv"
+
+If any of these files already exist then they will be overwritten.
*Copyright (c) 2021-2022, Arm Limited. All rights reserved.*
diff --git a/script/graphs/tf-main-results.bash b/script/graphs/tf-main-results.bash
new file mode 100644
index 0000000..34ff885
--- /dev/null
+++ b/script/graphs/tf-main-results.bash
@@ -0,0 +1,197 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2022 Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+set -euo pipefail
+
+# Build and Jenkins URL.
+sub_build_url=${1}
+job_name="${2:-"tf-a-ci-gateway"}"
+filter=${3:-".*"}
+
+jenkins="${sub_build_url%%/job*}"
+
+# Utilise default paths to output files if none provided.
+job_target="$(dirname ${sub_build_url#*/job/})"
+PNGFILE=${PNGFILE:=${job_target}-result.png}
+CSVFILE=${CSVFILE:=${job_target}-result.csv}
+
+# Remove csv output file if it exists to append to empty file
+: > "${CSVFILE}"
+
+readarray -t sub_builds < <(curl -sSL "${sub_build_url}/api/json" | jq -Rr '
+ fromjson? | [
+ .subBuilds[]? | select(.jobName == "'${job_name}'") | .url
+ ] | .[]')
+
+# Get a csv list of relative paths to report.json, or "-" if no report.json
+report_rel_paths_url="${jenkins}/{$(echo $(IFS=,; echo "${sub_builds[*]}"))}/api/json"
+
+readarray -t report_paths < <(curl -fsSL --fail-early "${report_rel_paths_url}" \
+ | sed 's/--_curl_--.*$//' \
+ | sed -e 's/^{/'$(printf "\x1e")'{/' \
+ | jq -sr --seq '
+ [ .[]
+ | [ .artifacts[]?
+ | select (.fileName == "report.json")
+ | .relativePath ]
+ | if length > 0 then .[] else "-" end ]
+ | .[]')
+
+# Combine sub build urls with relative path to "report.json"
+# the empty entries are intentionally kept as -, so the output array can
+# be mapped onto ${sub_build_list}
+report_urls="$jenkins/{"
+for i in "${!sub_builds[@]}"
+do
+ if [[ ${report_paths[i]} != "-" ]]
+ then
+ report_urls="${report_urls}${sub_builds[i]}/artifact/${report_paths[i]},"
+ fi
+done
+
+# Strip last comma and add closing brace
+report_urls="${report_urls%?}}"
+
+# Get Child build information from each report.json.
+readarray -t child_file_list_array < <(curl -sL "${report_urls}" -o -\
+ | sed 's/--_curl_--.*$//' \
+ | jq -sr --arg FILTER "${filter}" \
+ '[.[]
+ | .job as $job
+ | [ .child_build_numbers?, [(.test_files[]
+ | sub("\\.test";"")
+ | split("%") as $config
+ | { group: $config[1], suite: $config[2]})]]
+ | transpose
+ | map( {($job + "/" + .[0]) : .[1]} | to_entries )
+ | add
+ | map(select(.value.suite | test(".*"; "il")?
+ and (endswith("nil")
+ or endswith("norun-fip.dummy") | not)))
+ | if ( length > 0 )
+ then .[] else empty end
+ | .value.group, (.value.suite | gsub("\\,nil";"")), .key]
+ | .[]')
+
+# These three arrays should be the same length, and values at the same index
+# correspond to the same child build
+declare -a tftf_keys tftf_suite tftf_group
+
+for i in $(seq 0 3 $((${#child_file_list_array[@]}-1))) ; do
+ tftf_group+=("${child_file_list_array[$i]}")
+ tftf_suite+=("${child_file_list_array[$i+1]}")
+ tftf_keys+=("${child_file_list_array[$i+2]}")
+done
+
+
+child_output_results_url="${jenkins}/job/{$(echo $(IFS=,; echo "${tftf_keys[*]}"))}/api/json"
+
+# Retrieve relative path to either "uart0_full.txt" (FVP) or
+# "job_output.log" (LAVA) for each child job. Once again values where no match
+# is found are intentionally kept as "-" so the array can be correlated with
+# ${tftf_suite}.
+readarray -t child_output_results < <(curl -fsSL --fail-early "$child_output_results_url" \
+ | sed 's/}{/}\n{/g' \
+ | jq -sr '[ .[]
+ | ([ .artifacts[]?
+ | select(.fileName == "uart0_full.txt"
+ or .fileName == "job_output.log"
+ or .fileName == "lava-uart0.log") ]
+ | if length > 0
+ then .[0].relativePath else "-" end), .result ]
+ | .[]')
+
+# Combine job and child_build number with relative path to output file
+testlog_urls="${jenkins}/job/{"
+tftf_child_results=()
+
+for i in $(seq 0 2 $((${#child_output_results[@]}-1))) ; do
+ testlog_urls+="${tftf_keys[$((i/2))]}/artifact/${child_output_results[$i]},"
+ tftf_child_results+=(${child_output_results[$((i+1))]})
+done
+
+# Remove final comma and append a closing brace
+testlog_urls="${testlog_urls%?}}"
+
+# Retrieve the log for each child with --include to also retrieve the HTTP
+# header and grep for a block like:
+# Tests Skipped : 125
+# Tests Passed : 45
+# Tests Failed : 0
+# Tests Crashed : 0
+#
+# If none is found the line HTTP is used to delemit each entry
+#
+# Logs from Lava each message is wrapped with braces and has some pre-amble,
+# which is removed with sed.
+
+tftf_result_keys=(
+ "TestGroup" "TestSuite" "URL" "Result" "Passed" "Failed" "Crashed" "Skipped"
+)
+declare -A results_split="( $(for ord_ in ${tftf_result_keys[@]} ; do echo -n "[$ord_]=\"\" "; done))"
+declare output_csv_str="" csv_row=""
+
+read -ra tftf_urls <<< "$(eval "echo ${testlog_urls}")"
+
+# FIXME adjust this so we can handle both LAVA logs
+# for each test suite
+# curl the result log if its not '-'
+# remove debug information
+# if results is none:
+ # use "Result"
+# else
+ # read each key
+# write row to csv
+
+# Sort results into rows:
+for i in ${!tftf_suite[*]}; do
+ results_split["TestGroup"]="${tftf_group[$i]:-}"
+ results_split["TestSuite"]="\"${tftf_suite[$i]:-}\""
+ results_split["URL"]="${tftf_urls[$i]:-}"
+ results_split["Result"]="${tftf_child_results[$i]:-}"
+
+ # Skipped/Crashed are always zero if no test block is found
+ results_split["Skipped"]="0"
+ results_split["Crashed"]="0"
+ if [[ "${results_split["Result"]}" == "SUCCESS" ]];
+ then
+ results_split["Passed"]="1"
+ results_split["Failed"]="0"
+ else
+ results_split["Passed"]="0"
+ results_split["Failed"]="1"
+ fi
+
+ readarray -t raw_result < <(curl -sL --include "${results_split["URL"]}" \
+ | sed 's/.*msg": "//g' \
+ | grep --text -E "^Tests|HTTP\/")
+
+ for line in "${raw_result[@]}"; do
+ if [[ "${line}" == Test* ]]
+ then
+ k=$(echo "${line}" | awk -F ' ' '{print $2}')
+ count="${line//[!0-9]/}"
+ results_split[$k]=$count
+ fi
+ done
+
+ # Generate CSV row using array of ordinals to align with headers.
+ readarray -t row < <(for k in ${tftf_result_keys[@]} ; do echo "${results_split[$k]}"; done )
+ output_csv_str="${output_csv_str} $(echo $(IFS=,; echo "${row[*]}"))"
+ unset results_split[{..}] row
+done
+
+# Replace spaces in header with commas and print to the output file.
+echo $(IFS=,; echo "${tftf_result_keys[*]}") > ${CSVFILE}
+
+# Sort Filenames alphabetically and store in csv for gnuplot
+sorted=($(IFS=$' '; sort <<<$output_csv_str))
+printf "%b\n" "${sorted[@]}" >> ${CSVFILE}
+
+# Produce PNG image of graph using gnuplot and .plot description file
+gnuplot -e "jenkins_id='$sub_build_url'" -c ${0%bash}plot \
+ "$CSVFILE" > "$PNGFILE"
diff --git a/script/graphs/tf-main-results.plot b/script/graphs/tf-main-results.plot
new file mode 100644
index 0000000..3c8e345
--- /dev/null
+++ b/script/graphs/tf-main-results.plot
@@ -0,0 +1,58 @@
+#
+# Copyright (c) 2022 Arm Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Determine the number of test groups, scale the terminal based on the number
+# of these.
+groups=system(sprintf("awk -F ',' 'NR!=1 {print $1}' %s | sort | uniq", ARG1))
+group_count=words(groups)
+
+# Terminal height should scale with the number of groups
+# (each group has a plot).
+set terminal pngcairo enhanced size 5400,9600 font ',14'
+
+set xrange [0:5<*] extend
+set ytic scale 0 nomirror
+set grid xtics
+set lmargin 70
+set bmargin 5
+set yrange [:] reverse
+set offsets 0,0,0.5,0.5
+set datafile separator ","
+
+set key autotitle columnhead
+
+# Create linetypes for coloured labels
+set linetype 1 linecolor "red"
+set linetype 2 linecolor "black"
+fill(n) = word("green red orange gray", n)
+
+set multiplot title "TF-A CI Test Results: " . jenkins_id \
+ font ",30" layout ceil(group_count/3.0),3
+
+ set style data histograms
+ set style fill solid 0.3 border -1
+ set key outside left vertical
+ set label "Test Suites" at screen 0.05,0.5 \
+ center front rotate font ",20"
+
+ do for [group in groups]{
+ set title group font ",18"
+ set style histogram rowstacked
+ filter = "awk -F, 'NR==1 || $1==\"".group."\"'"
+ col_count = 8
+ box_width = 0.5
+
+ plot for [col=5:col_count] '< '.filter.' '.ARG1 u col:0: \
+ (sum [i=5:col-1] column(i)): \
+ (sum [i=5:col] column(i)): \
+ ($0-box_width/2.):($0+box_width/2.):ytic(2) w boxxyerror \
+ ti columnhead(col) lc rgb fill(col-4)
+
+ unset key
+ }
+
+unset multiplot
+