Merge pull request #8346 from yanrayw/issue8314/accel_cipher_cmac
Driver status of CMAC: accelerate CMAC in accel_cipher
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
index 1f20734..9254331 100755
--- a/tests/scripts/analyze_outcomes.py
+++ b/tests/scripts/analyze_outcomes.py
@@ -22,17 +22,23 @@
self.error_count = 0
self.warning_count = 0
- @staticmethod
- def log(fmt, *args, **kwargs):
- sys.stderr.write((fmt + '\n').format(*args, **kwargs))
+ def new_section(self, fmt, *args, **kwargs):
+ self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs)
+
+ def info(self, fmt, *args, **kwargs):
+ self._print_line('Info: ' + fmt, *args, **kwargs)
def error(self, fmt, *args, **kwargs):
- self.log('Error: ' + fmt, *args, **kwargs)
self.error_count += 1
+ self._print_line('Error: ' + fmt, *args, **kwargs)
def warning(self, fmt, *args, **kwargs):
- self.log('Warning: ' + fmt, *args, **kwargs)
self.warning_count += 1
+ self._print_line('Warning: ' + fmt, *args, **kwargs)
+
+ @staticmethod
+ def _print_line(fmt, *args, **kwargs):
+ sys.stderr.write((fmt + '\n').format(*args, **kwargs))
class TestCaseOutcomes:
"""The outcomes of one test case across many configurations."""
@@ -53,25 +59,24 @@
"""
return len(self.successes) + len(self.failures)
-def execute_reference_driver_tests(ref_component, driver_component, outcome_file):
+def execute_reference_driver_tests(results: Results, ref_component, driver_component, \
+ outcome_file):
"""Run the tests specified in ref_component and driver_component. Results
are stored in the output_file and they will be used for the following
coverage analysis"""
# If the outcome file already exists, we assume that the user wants to
# perform the comparison analysis again without repeating the tests.
if os.path.exists(outcome_file):
- Results.log("Outcome file (" + outcome_file + ") already exists. " + \
- "Tests will be skipped.")
+ results.info("Outcome file ({}) already exists. Tests will be skipped.", outcome_file)
return
shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
" " + ref_component + " " + driver_component
- Results.log("Running: " + shell_command)
+ results.info("Running: {}", shell_command)
ret_val = subprocess.run(shell_command.split(), check=False).returncode
if ret_val != 0:
- Results.log("Error: failed to run reference/driver components")
- sys.exit(ret_val)
+ results.error("failed to run reference/driver components")
def analyze_coverage(results, outcomes, allow_list, full_coverage):
"""Check that all available test cases are executed at least once."""
@@ -90,7 +95,8 @@
else:
results.warning('Allow listed test case was executed: {}', key)
-def analyze_driver_vs_reference(outcomes, component_ref, component_driver,
+def analyze_driver_vs_reference(results: Results, outcomes,
+ component_ref, component_driver,
ignored_suites, ignored_test=None):
"""Check that all tests executed in the reference component are also
executed in the corresponding driver component.
@@ -100,7 +106,6 @@
output string is provided
"""
available = check_test_cases.collect_available_test_cases()
- result = True
for key in available:
# Continue if test was not executed by any component
@@ -125,16 +130,12 @@
if component_ref in entry:
reference_test_passed = True
if(reference_test_passed and not driver_test_passed):
- Results.log(key)
- result = False
- return result
+ results.error("Did not pass with driver: {}", key)
-def analyze_outcomes(outcomes, args):
+def analyze_outcomes(results: Results, outcomes, args):
"""Run all analyses on the given outcome collection."""
- results = Results()
analyze_coverage(results, outcomes, args['allow_list'],
args['full_coverage'])
- return results
def read_outcome_file(outcome_file):
"""Parse an outcome file and return an outcome collection.
@@ -157,29 +158,30 @@
outcomes[key].failures.append(setup)
return outcomes
-def do_analyze_coverage(outcome_file, args):
+def do_analyze_coverage(results: Results, outcome_file, args):
"""Perform coverage analysis."""
+ results.new_section("Analyze coverage")
outcomes = read_outcome_file(outcome_file)
- Results.log("\n*** Analyze coverage ***\n")
- results = analyze_outcomes(outcomes, args)
- return results.error_count == 0
+ analyze_outcomes(results, outcomes, args)
-def do_analyze_driver_vs_reference(outcome_file, args):
+def do_analyze_driver_vs_reference(results: Results, outcome_file, args):
"""Perform driver vs reference analyze."""
- execute_reference_driver_tests(args['component_ref'], \
- args['component_driver'], outcome_file)
+ results.new_section("Analyze driver {} vs reference {}",
+ args['component_driver'], args['component_ref'])
+
+ execute_reference_driver_tests(results, args['component_ref'], \
+ args['component_driver'], outcome_file)
ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
outcomes = read_outcome_file(outcome_file)
- Results.log("\n*** Analyze driver {} vs reference {} ***\n".format(
- args['component_driver'], args['component_ref']))
- return analyze_driver_vs_reference(outcomes, args['component_ref'],
- args['component_driver'], ignored_suites,
- args['ignored_tests'])
+
+ analyze_driver_vs_reference(results, outcomes,
+ args['component_ref'], args['component_driver'],
+ ignored_suites, args['ignored_tests'])
# List of tasks with a function that can handle this task and additional arguments if required
-TASKS = {
+KNOWN_TASKS = {
'analyze_coverage': {
'test_function': do_analyze_coverage,
'args': {
@@ -641,11 +643,13 @@
}
def main():
+ main_results = Results()
+
try:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
help='Outcome file to analyze')
- parser.add_argument('task', default='all', nargs='?',
+ parser.add_argument('specified_tasks', default='all', nargs='?',
help='Analysis to be done. By default, run all tasks. '
'With one or more TASK, run only those. '
'TASK can be the name of a single task or '
@@ -660,33 +664,31 @@
options = parser.parse_args()
if options.list:
- for task in TASKS:
- Results.log(task)
+ for task in KNOWN_TASKS:
+ print(task)
sys.exit(0)
- result = True
-
- if options.task == 'all':
- tasks = TASKS.keys()
+ if options.specified_tasks == 'all':
+ tasks_list = KNOWN_TASKS.keys()
else:
- tasks = re.split(r'[, ]+', options.task)
+ tasks_list = re.split(r'[, ]+', options.specified_tasks)
+ for task in tasks_list:
+ if task not in KNOWN_TASKS:
+ sys.stderr.write('invalid task: {}'.format(task))
+ sys.exit(2)
- for task in tasks:
- if task not in TASKS:
- Results.log('Error: invalid task: {}'.format(task))
- sys.exit(1)
+ KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
- TASKS['analyze_coverage']['args']['full_coverage'] = \
- options.full_coverage
+ for task in tasks_list:
+ test_function = KNOWN_TASKS[task]['test_function']
+ test_args = KNOWN_TASKS[task]['args']
+ test_function(main_results, options.outcomes, test_args)
- for task in TASKS:
- if task in tasks:
- if not TASKS[task]['test_function'](options.outcomes, TASKS[task]['args']):
- result = False
+ main_results.info("Overall results: {} warnings and {} errors",
+ main_results.warning_count, main_results.error_count)
- if result is False:
- sys.exit(1)
- Results.log("SUCCESS :-)")
+ sys.exit(0 if (main_results.error_count == 0) else 1)
+
except Exception: # pylint: disable=broad-except
# Print the backtrace and exit explicitly with our chosen status.
traceback.print_exc()
diff --git a/tests/ssl-opt.sh b/tests/ssl-opt.sh
index 0dcff67..51d59bb 100755
--- a/tests/ssl-opt.sh
+++ b/tests/ssl-opt.sh
@@ -2572,32 +2572,32 @@
run_test_psa TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256
run_test_psa TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384
-requires_config_enabled MBEDTLS_ECP_DP_SECP521R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_R1_521
run_test_psa_force_curve "secp521r1"
-requires_config_enabled MBEDTLS_ECP_DP_BP512R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_BRAINPOOL_P_R1_512
run_test_psa_force_curve "brainpoolP512r1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP384R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_R1_384
run_test_psa_force_curve "secp384r1"
-requires_config_enabled MBEDTLS_ECP_DP_BP384R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_BRAINPOOL_P_R1_384
run_test_psa_force_curve "brainpoolP384r1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP256R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_R1_256
run_test_psa_force_curve "secp256r1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP256K1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_K1_256
run_test_psa_force_curve "secp256k1"
-requires_config_enabled MBEDTLS_ECP_DP_BP256R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_BRAINPOOL_P_R1_256
run_test_psa_force_curve "brainpoolP256r1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP224R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_R1_224
run_test_psa_force_curve "secp224r1"
## SECP224K1 is buggy via the PSA API
## (https://github.com/Mbed-TLS/mbedtls/issues/3541),
## so it is disabled in PSA even when it's enabled in Mbed TLS.
## The proper dependency would be on PSA_WANT_ECC_SECP_K1_224 but
## dependencies on PSA symbols in ssl-opt.sh are not implemented yet.
-#requires_config_enabled MBEDTLS_ECP_DP_SECP224K1_ENABLED
+#requires_config_enabled PSA_WANT_ECC_SECP_K1_224
#run_test_psa_force_curve "secp224k1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP192R1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_R1_192
run_test_psa_force_curve "secp192r1"
-requires_config_enabled MBEDTLS_ECP_DP_SECP192K1_ENABLED
+requires_config_enabled PSA_WANT_ECC_SECP_K1_192
run_test_psa_force_curve "secp192k1"
# Test current time in ServerHello