New script for test outcome analysis
This is a new script designed to analyze test outcomes collected
during a whole CI run.
This commit introduces the script, the code to read the outcome file,
and a very simple framework to report errors. It does not perform any
actual analysis yet.
Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com>
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
new file mode 100755
index 0000000..9d011db
--- /dev/null
+++ b/tests/scripts/analyze_outcomes.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+
+"""Analyze the test outcomes from a full CI run.
+
+This script can also run on outcomes from a partial run, but the results are
+less likely to be useful.
+"""
+
+import argparse
+import sys
+import traceback
+
+class Results:
+ """Process analysis results."""
+
+ def __init__(self):
+ self.error_count = 0
+ self.warning_count = 0
+
+ @staticmethod
+ def log(fmt, *args, **kwargs):
+ sys.stderr.write((fmt + '\n').format(*args, **kwargs))
+
+ def error(self, fmt, *args, **kwargs):
+ self.log('Error: ' + fmt, *args, **kwargs)
+ self.error_count += 1
+
+ def warning(self, fmt, *args, **kwargs):
+ self.log('Warning: ' + fmt, *args, **kwargs)
+ self.warning_count += 1
+
+class TestCaseOutcomes:
+ """The outcomes of one test case across many configurations."""
+ # pylint: disable=too-few-public-methods
+
+ def __init__(self):
+ self.successes = []
+ self.failures = []
+
+ def hits(self):
+ """Return the number of times a test case has been run.
+
+ This includes passes and failures, but not skips.
+ """
+ return len(self.successes) + len(self.failures)
+
+def analyze_outcomes(outcomes):
+ """Run all analyses on the given outcome collection."""
+ results = Results()
+ return results
+
+def read_outcome_file(outcome_file):
+ """Parse an outcome file and return an outcome collection.
+
+An outcome collection is a dictionary mapping keys to TestCaseOutcomes objects.
+The keys are the test suite name and the test case description, separated
+by a semicolon.
+"""
+ outcomes = {}
+ with open(outcome_file, 'r', encoding='utf-8') as input_file:
+ for line in input_file:
+ (platform, config, suite, case, result, _cause) = line.split(';')
+ key = ';'.join([suite, case])
+ setup = ';'.join([platform, config])
+ if key not in outcomes:
+ outcomes[key] = TestCaseOutcomes()
+ if result == 'PASS':
+ outcomes[key].successes.append(setup)
+ elif result == 'FAIL':
+ outcomes[key].failures.append(setup)
+ return outcomes
+
+def analyze_outcome_file(outcome_file):
+ """Analyze the given outcome file."""
+ outcomes = read_outcome_file(outcome_file)
+ return analyze_outcomes(outcomes)
+
+def main():
+ try:
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
+ help='Outcome file to analyze')
+ options = parser.parse_args()
+ results = analyze_outcome_file(options.outcomes)
+ if results.error_count > 0:
+ sys.exit(1)
+ except Exception: # pylint: disable=broad-except
+ # Print the backtrace and exit explicitly with our chosen status.
+ traceback.print_exc()
+ sys.exit(120)
+
+if __name__ == '__main__':
+ main()