Merge upstream 3.0 from 'development' in ARMmbed/mbedtls
Signed-off-by: Yuto Takano <yuto.takano@arm.com>
diff --git a/tests/scripts/all-in-docker.sh b/tests/scripts/all-in-docker.sh
new file mode 100755
index 0000000..8c9ff47
--- /dev/null
+++ b/tests/scripts/all-in-docker.sh
@@ -0,0 +1,35 @@
+#!/bin/bash -eu
+
+# all-in-docker.sh
+#
+# Purpose
+# -------
+# This runs all.sh (except for armcc) in a Docker container.
+#
+# Notes for users
+# ---------------
+# See docker_env.sh for prerequisites and other information.
+#
+# See also all.sh for notes about invocation of that script.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tests/scripts/docker_env.sh
+
+# Run tests that are possible with openly available compilers
+run_in_docker tests/scripts/all.sh \
+ --no-armcc \
+ $@
diff --git a/tests/scripts/all.sh b/tests/scripts/all.sh
index 851287f..efcb554 100755
--- a/tests/scripts/all.sh
+++ b/tests/scripts/all.sh
@@ -2,9 +2,20 @@
# all.sh
#
-# This file is part of mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2014-2017, ARM Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
@@ -23,8 +34,9 @@
# Warning: the test is destructive. It includes various build modes and
# configurations, and can and will arbitrarily change the current CMake
# configuration. The following files must be committed into git:
-# * include/mbedtls/config.h
-# * Makefile, library/Makefile, programs/Makefile, tests/Makefile
+# * include/mbedtls/mbedtls_config.h
+# * Makefile, library/Makefile, programs/Makefile, tests/Makefile,
+# programs/fuzz/Makefile
# After running this script, the CMake cache will be lost and CMake
# will no longer be initialised.
#
@@ -35,13 +47,13 @@
# * GNU Make
# * CMake
# * GCC and Clang (recent enough for using ASan with gcc and MemSan with clang, or valgrind)
+# * G++
# * arm-gcc and mingw-gcc
# * ArmCC 5 and ArmCC 6, unless invoked with --no-armcc
-# * Yotta build dependencies, unless invoked with --no-yotta
# * OpenSSL and GnuTLS command line tools, recent enough for the
-# interoperability tests. If they don't support SSLv3 then a legacy
-# version of these tools must be present as well (search for LEGACY
-# below).
+# interoperability tests. If they don't support old features which we want
+# to test, then a legacy version of these tools must be present as well
+# (search for LEGACY below).
# See the invocation of check_tools below for details.
#
# This script must be invoked from the toplevel directory of a git
@@ -55,21 +67,46 @@
# Notes for maintainers
# ---------------------
#
+# The bulk of the code is organized into functions that follow one of the
+# following naming conventions:
+# * pre_XXX: things to do before running the tests, in order.
+# * component_XXX: independent components. They can be run in any order.
+# * component_check_XXX: quick tests that aren't worth parallelizing.
+# * component_build_XXX: build things but don't run them.
+# * component_test_XXX: build and test.
+# * support_XXX: if support_XXX exists and returns false then
+# component_XXX is not run by default.
+# * post_XXX: things to do after running the tests.
+# * other: miscellaneous support functions.
+#
+# Each component must start by invoking `msg` with a short informative message.
+#
+# The framework performs some cleanup tasks after each component. This
+# means that components can assume that the working directory is in a
+# cleaned-up state, and don't need to perform the cleanup themselves.
+# * Run `make clean`.
+# * Restore `include/mbedtls/mbedtls_config.h` from a backup made before running
+# the component.
+# * Check out `Makefile`, `library/Makefile`, `programs/Makefile`,
+# `tests/Makefile` and `programs/fuzz/Makefile` from git.
+# This cleans up after an in-tree use of CMake.
+#
+# Any command that is expected to fail must be protected so that the
+# script keeps running in --keep-going mode despite `set -e`. In keep-going
+# mode, if a protected command fails, this is logged as a failure and the
+# script will exit with a failure status once it has run all components.
+# Commands can be protected in any of the following ways:
+# * `make` is a function which runs the `make` command with protection.
+# Note that you must write `make VAR=value`, not `VAR=value make`,
+# because the `VAR=value make` syntax doesn't work with functions.
+# * Put `report_status` before the command to protect it.
+# * Put `if_build_successful` before a command. This protects it, and
+# additionally skips it if a prior invocation of `make` in the same
+# component failed.
+#
# The tests are roughly in order from fastest to slowest. This doesn't
# have to be exact, but in general you should add slower tests towards
# the end and fast checks near the beginning.
-#
-# Sanity checks have the following form:
-# 1. msg "short description of what is about to be done"
-# 2. run sanity check (failure stops the script)
-#
-# Build or build-and-test steps have the following form:
-# 1. msg "short description of what is about to be done"
-# 2. cleanup
-# 3. preparation (config.pl, cmake, ...) (failure stops the script)
-# 4. make
-# 5. Run tests if relevant. All tests must be prefixed with
-# if_build_successful for the sake of --keep-going.
@@ -80,60 +117,134 @@
# Abort on errors (and uninitialised variables)
set -eu
-if [ "$( uname )" != "Linux" ]; then
- echo "This script only works in Linux" >&2
- exit 1
-elif [ -d library -a -d include -a -d tests ]; then :; else
- echo "Must be run from mbed TLS root" >&2
- exit 1
-fi
+pre_check_environment () {
+ if [ -d library -a -d include -a -d tests ]; then :; else
+ echo "Must be run from mbed TLS root" >&2
+ exit 1
+ fi
+}
-CONFIG_H='include/mbedtls/config.h'
-CONFIG_BAK="$CONFIG_H.bak"
+pre_initialize_variables () {
+ CONFIG_H='include/mbedtls/mbedtls_config.h'
+ CONFIG_BAK="$CONFIG_H.bak"
+ CRYPTO_CONFIG_H='include/psa/crypto_config.h'
+ CRYPTO_CONFIG_BAK="$CRYPTO_CONFIG_H.bak"
-MEMORY=0
-FORCE=0
-KEEP_GOING=0
-RUN_ARMCC=1
-YOTTA=1
+ append_outcome=0
+ MEMORY=0
+ FORCE=0
+ QUIET=0
+ KEEP_GOING=0
-# Default commands, can be overriden by the environment
-: ${OPENSSL:="openssl"}
-: ${OPENSSL_LEGACY:="$OPENSSL"}
-: ${GNUTLS_CLI:="gnutls-cli"}
-: ${GNUTLS_SERV:="gnutls-serv"}
-: ${GNUTLS_LEGACY_CLI:="$GNUTLS_CLI"}
-: ${GNUTLS_LEGACY_SERV:="$GNUTLS_SERV"}
-: ${OUT_OF_SOURCE_DIR:=./mbedtls_out_of_source_build}
-: ${ARMC5_BIN_DIR:=/usr/bin}
-: ${ARMC6_BIN_DIR:=/usr/bin}
+ # Seed value used with the --release-test option.
+ #
+ # See also RELEASE_SEED in basic-build-test.sh. Debugging is easier if
+ # both values are kept in sync. If you change the value here because it
+ # breaks some tests, you'll definitely want to change it in
+ # basic-build-test.sh as well.
+ RELEASE_SEED=1
-# if MAKEFLAGS is not set add the -j option to speed up invocations of make
-if [ -n "${MAKEFLAGS+set}" ]; then
- export MAKEFLAGS="-j"
-fi
+ : ${MBEDTLS_TEST_OUTCOME_FILE=}
+ : ${MBEDTLS_TEST_PLATFORM="$(uname -s | tr -c \\n0-9A-Za-z _)-$(uname -m | tr -c \\n0-9A-Za-z _)"}
+ export MBEDTLS_TEST_OUTCOME_FILE
+ export MBEDTLS_TEST_PLATFORM
+
+ # Default commands, can be overridden by the environment
+ : ${OPENSSL:="openssl"}
+ : ${OPENSSL_LEGACY:="$OPENSSL"}
+ : ${OPENSSL_NEXT:="$OPENSSL"}
+ : ${GNUTLS_CLI:="gnutls-cli"}
+ : ${GNUTLS_SERV:="gnutls-serv"}
+ : ${GNUTLS_LEGACY_CLI:="$GNUTLS_CLI"}
+ : ${GNUTLS_LEGACY_SERV:="$GNUTLS_SERV"}
+ : ${OUT_OF_SOURCE_DIR:=./mbedtls_out_of_source_build}
+ : ${ARMC5_BIN_DIR:=/usr/bin}
+ : ${ARMC6_BIN_DIR:=/usr/bin}
+ : ${ARM_NONE_EABI_GCC_PREFIX:=arm-none-eabi-}
+
+ # if MAKEFLAGS is not set add the -j option to speed up invocations of make
+ if [ -z "${MAKEFLAGS+set}" ]; then
+ export MAKEFLAGS="-j"
+ fi
+
+ # Include more verbose output for failing tests run by CMake
+ export CTEST_OUTPUT_ON_FAILURE=1
+
+ # CFLAGS and LDFLAGS for Asan builds that don't use CMake
+ ASAN_CFLAGS='-Werror -Wall -Wextra -fsanitize=address,undefined -fno-sanitize-recover=all'
+
+ # Gather the list of available components. These are the functions
+ # defined in this script whose name starts with "component_".
+ # Parse the script with sed, because in sh there is no way to list
+ # defined functions.
+ ALL_COMPONENTS=$(sed -n 's/^ *component_\([0-9A-Z_a-z]*\) *().*/\1/p' <"$0")
+
+ # Exclude components that are not supported on this platform.
+ SUPPORTED_COMPONENTS=
+ for component in $ALL_COMPONENTS; do
+ case $(type "support_$component" 2>&1) in
+ *' function'*)
+ if ! support_$component; then continue; fi;;
+ esac
+ SUPPORTED_COMPONENTS="$SUPPORTED_COMPONENTS $component"
+ done
+}
+
+# Test whether the component $1 is included in the command line patterns.
+is_component_included()
+{
+ set -f
+ for pattern in $COMMAND_LINE_COMPONENTS; do
+ set +f
+ case ${1#component_} in $pattern) return 0;; esac
+ done
+ set +f
+ return 1
+}
usage()
{
cat <<EOF
-Usage: $0 [OPTION]...
- -h|--help Print this help.
+Usage: $0 [OPTION]... [COMPONENT]...
+Run mbedtls release validation tests.
+By default, run all tests. With one or more COMPONENT, run only those.
+COMPONENT can be the name of a component or a shell wildcard pattern.
+
+Examples:
+ $0 "check_*"
+ Run all sanity checks.
+ $0 --no-armcc --except test_memsan
+ Run everything except builds that require armcc and MemSan.
+
+Special options:
+ -h|--help Print this help and exit.
+ --list-all-components List all available test components and exit.
+ --list-components List components supported on this platform and exit.
General options:
+ -q|--quiet Only output component names, and errors if any.
-f|--force Force the tests to overwrite any modified files.
-k|--keep-going Run all tests and report errors at the end.
-m|--memory Additional optional memory tests.
+ --append-outcome Append to the outcome file (if used).
+ --arm-none-eabi-gcc-prefix=<string>
+ Prefix for a cross-compiler for arm-none-eabi
+ (default: "${ARM_NONE_EABI_GCC_PREFIX}")
--armcc Run ARM Compiler builds (on by default).
+ --except Exclude the COMPONENTs listed on the command line,
+ instead of running only those.
+ --no-append-outcome Write a new outcome file and analyze it (default).
--no-armcc Skip ARM Compiler builds.
--no-force Refuse to overwrite modified files (default).
--no-keep-going Stop at the first error (default).
--no-memory No additional memory tests (default).
- --no-yotta Skip yotta module build.
+ --no-quiet Print full ouput from components.
--out-of-source-dir=<path> Directory used for CMake out-of-source build tests.
+ --outcome-file=<path> File where test outcomes are written (not done if
+ empty; default: \$MBEDTLS_TEST_OUTCOME_FILE).
--random-seed Use a random seed value for randomized tests (default).
- -r|--release-test Run this script in release mode. This fixes the seed value to 1.
+ -r|--release-test Run this script in release mode. This fixes the seed value to ${RELEASE_SEED}.
-s|--seed Integer seed value to use for this test run.
- --yotta Build yotta module (on by default).
Tool path options:
--armc5-bin-dir=<ARMC5_bin_dir_path> ARM Compiler 5 bin directory.
@@ -143,23 +254,53 @@
--gnutls-legacy-cli=<GnuTLS_cli_path> GnuTLS client executable to use for legacy tests.
--gnutls-legacy-serv=<GnuTLS_serv_path> GnuTLS server executable to use for legacy tests.
--openssl=<OpenSSL_path> OpenSSL executable to use for most tests.
- --openssl-legacy=<OpenSSL_path> OpenSSL executable to use for legacy tests e.g. SSLv3.
+ --openssl-legacy=<OpenSSL_path> OpenSSL executable to use for legacy tests..
+ --openssl-next=<OpenSSL_path> OpenSSL executable to use for recent things like ARIA
EOF
}
# remove built files as well as the cmake cache/config
cleanup()
{
+ if [ -n "${MBEDTLS_ROOT_DIR+set}" ]; then
+ cd "$MBEDTLS_ROOT_DIR"
+ fi
+
command make clean
- find . -name yotta -prune -o -iname '*cmake*' -not -name CMakeLists.txt -exec rm -rf {} \+
+ # Remove CMake artefacts
+ find . -name .git -prune -o \
+ -iname CMakeFiles -exec rm -rf {} \+ -o \
+ \( -iname cmake_install.cmake -o \
+ -iname CTestTestfile.cmake -o \
+ -iname CMakeCache.txt \) -exec rm -f {} \+
+ # Recover files overwritten by in-tree CMake builds
rm -f include/Makefile include/mbedtls/Makefile programs/*/Makefile
- git update-index --no-skip-worktree Makefile library/Makefile programs/Makefile tests/Makefile
- git checkout -- Makefile library/Makefile programs/Makefile tests/Makefile
+ git update-index --no-skip-worktree Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+ git checkout -- Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+
+ # Remove any artifacts from the component_test_cmake_as_subdirectory test.
+ rm -rf programs/test/cmake_subproject/build
+ rm -f programs/test/cmake_subproject/Makefile
+ rm -f programs/test/cmake_subproject/cmake_subproject
+
+ # Remove any artifacts from the component_test_cmake_as_package test.
+ rm -rf programs/test/cmake_package/build
+ rm -f programs/test/cmake_package/Makefile
+ rm -f programs/test/cmake_package/cmake_package
+
+ # Remove any artifacts from the component_test_cmake_as_installed_package test.
+ rm -rf programs/test/cmake_package_install/build
+ rm -f programs/test/cmake_package_install/Makefile
+ rm -f programs/test/cmake_package_install/cmake_package_install
if [ -f "$CONFIG_BAK" ]; then
mv "$CONFIG_BAK" "$CONFIG_H"
fi
+
+ if [ -f "$CRYPTO_CONFIG_BAK" ]; then
+ mv "$CRYPTO_CONFIG_BAK" "$CRYPTO_CONFIG_H"
+ fi
}
# Executed on exit. May be redefined depending on command line options.
@@ -180,25 +321,36 @@
msg()
{
+ if [ -n "${current_component:-}" ]; then
+ current_section="${current_component#component_}: $1"
+ else
+ current_section="$1"
+ fi
+
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
echo ""
echo "******************************************************************"
- echo "* $1 "
+ echo "* $current_section "
printf "* "; date
echo "******************************************************************"
- current_section=$1
}
-if [ $RUN_ARMCC -ne 0 ]; then
- armc6_build_test()
- {
- FLAGS="$1"
+armc6_build_test()
+{
+ FLAGS="$1"
- msg "build: ARM Compiler 6 ($FLAGS), make"
- ARM_TOOL_VARIANT="ult" CC="$ARMC6_CC" AR="$ARMC6_AR" CFLAGS="$FLAGS" \
- WARNING_CFLAGS='-xc -std=c99' make lib
- make clean
- }
-fi
+ msg "build: ARM Compiler 6 ($FLAGS)"
+ ARM_TOOL_VARIANT="ult" CC="$ARMC6_CC" AR="$ARMC6_AR" CFLAGS="$FLAGS" \
+ WARNING_CFLAGS='-xc -std=c99' make lib
+
+ msg "size: ARM Compiler 6 ($FLAGS)"
+ "$ARMC6_FROMELF" -z library/*.o
+
+ make clean
+}
err_msg()
{
@@ -208,79 +360,119 @@
check_tools()
{
for TOOL in "$@"; do
- if ! `hash "$TOOL" >/dev/null 2>&1`; then
+ if ! `type "$TOOL" >/dev/null 2>&1`; then
err_msg "$TOOL not found!"
exit 1
fi
done
}
-while [ $# -gt 0 ]; do
- case "$1" in
- --armcc) RUN_ARMCC=1;;
- --armc5-bin-dir) shift; ARMC5_BIN_DIR="$1";;
- --armc6-bin-dir) shift; ARMC6_BIN_DIR="$1";;
- --force|-f) FORCE=1;;
- --gnutls-cli) shift; GNUTLS_CLI="$1";;
- --gnutls-legacy-cli) shift; GNUTLS_LEGACY_CLI="$1";;
- --gnutls-legacy-serv) shift; GNUTLS_LEGACY_SERV="$1";;
- --gnutls-serv) shift; GNUTLS_SERV="$1";;
- --help|-h) usage; exit;;
- --keep-going|-k) KEEP_GOING=1;;
- --memory|-m) MEMORY=1;;
- --no-armcc) RUN_ARMCC=0;;
- --no-force) FORCE=0;;
- --no-keep-going) KEEP_GOING=0;;
- --no-memory) MEMORY=0;;
- --no-yotta) YOTTA=0;;
- --openssl) shift; OPENSSL="$1";;
- --openssl-legacy) shift; OPENSSL_LEGACY="$1";;
- --out-of-source-dir) shift; OUT_OF_SOURCE_DIR="$1";;
- --random-seed) unset SEED;;
- --release-test|-r) SEED=1;;
- --seed|-s) shift; SEED="$1";;
- --yotta) YOTTA=1;;
- *)
- echo >&2 "Unknown option: $1"
- echo >&2 "Run $0 --help for usage."
- exit 120
- ;;
- esac
- shift
-done
+check_headers_in_cpp () {
+ ls include/mbedtls | grep "\.h$" >headers.txt
+ <programs/test/cpp_dummy_build.cpp sed -n 's/"$//; s!^#include "mbedtls/!!p' |
+ sort |
+ diff headers.txt -
+ rm headers.txt
+}
-if [ $FORCE -eq 1 ]; then
- if [ $YOTTA -eq 1 ]; then
- rm -rf yotta/module "$OUT_OF_SOURCE_DIR"
- fi
- git checkout-index -f -q $CONFIG_H
- cleanup
-else
+pre_parse_command_line () {
+ COMMAND_LINE_COMPONENTS=
+ all_except=0
+ no_armcc=
- if [ $YOTTA -ne 0 ] && [ -d yotta/module ]; then
- err_msg "Warning - there is an existing yotta module in the directory 'yotta/module'"
- echo "You can either delete your work and retry, or force the test to overwrite the"
- echo "test by rerunning the script as: $0 --force"
- exit 1
+ # Note that legacy options are ignored instead of being omitted from this
+ # list of options, so invocations that worked with previous version of
+ # all.sh will still run and work properly.
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ --append-outcome) append_outcome=1;;
+ --arm-none-eabi-gcc-prefix) shift; ARM_NONE_EABI_GCC_PREFIX="$1";;
+ --armcc) no_armcc=;;
+ --armc5-bin-dir) shift; ARMC5_BIN_DIR="$1";;
+ --armc6-bin-dir) shift; ARMC6_BIN_DIR="$1";;
+ --except) all_except=1;;
+ --force|-f) FORCE=1;;
+ --gnutls-cli) shift; GNUTLS_CLI="$1";;
+ --gnutls-legacy-cli) shift; GNUTLS_LEGACY_CLI="$1";;
+ --gnutls-legacy-serv) shift; GNUTLS_LEGACY_SERV="$1";;
+ --gnutls-serv) shift; GNUTLS_SERV="$1";;
+ --help|-h) usage; exit;;
+ --keep-going|-k) KEEP_GOING=1;;
+ --list-all-components) printf '%s\n' $ALL_COMPONENTS; exit;;
+ --list-components) printf '%s\n' $SUPPORTED_COMPONENTS; exit;;
+ --memory|-m) MEMORY=1;;
+ --no-append-outcome) append_outcome=0;;
+ --no-armcc) no_armcc=1;;
+ --no-force) FORCE=0;;
+ --no-keep-going) KEEP_GOING=0;;
+ --no-memory) MEMORY=0;;
+ --no-quiet) QUIET=0;;
+ --openssl) shift; OPENSSL="$1";;
+ --openssl-legacy) shift; OPENSSL_LEGACY="$1";;
+ --openssl-next) shift; OPENSSL_NEXT="$1";;
+ --outcome-file) shift; MBEDTLS_TEST_OUTCOME_FILE="$1";;
+ --out-of-source-dir) shift; OUT_OF_SOURCE_DIR="$1";;
+ --quiet|-q) QUIET=1;;
+ --random-seed) unset SEED;;
+ --release-test|-r) SEED=$RELEASE_SEED;;
+ --seed|-s) shift; SEED="$1";;
+ -*)
+ echo >&2 "Unknown option: $1"
+ echo >&2 "Run $0 --help for usage."
+ exit 120
+ ;;
+ *) COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS $1";;
+ esac
+ shift
+ done
+
+ # With no list of components, run everything.
+ if [ -z "$COMMAND_LINE_COMPONENTS" ]; then
+ all_except=1
fi
- if [ -d "$OUT_OF_SOURCE_DIR" ]; then
- echo "Warning - there is an existing directory at '$OUT_OF_SOURCE_DIR'" >&2
- echo "You can either delete this directory manually, or force the test by rerunning"
- echo "the script as: $0 --force --out-of-source-dir $OUT_OF_SOURCE_DIR"
- exit 1
+ # --no-armcc is a legacy option. The modern way is --except '*_armcc*'.
+ # Ignore it if components are listed explicitly on the command line.
+ if [ -n "$no_armcc" ] && [ $all_except -eq 1 ]; then
+ COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS *_armcc*"
fi
- if ! git diff-files --quiet include/mbedtls/config.h; then
- err_msg "Warning - the configuration file 'include/mbedtls/config.h' has been edited. "
- echo "You can either delete or preserve your work, or force the test by rerunning the"
- echo "script as: $0 --force"
- exit 1
- fi
-fi
+ # Build the list of components to run.
+ RUN_COMPONENTS=
+ for component in $SUPPORTED_COMPONENTS; do
+ if is_component_included "$component"; [ $? -eq $all_except ]; then
+ RUN_COMPONENTS="$RUN_COMPONENTS $component"
+ fi
+ done
-build_status=0
-if [ $KEEP_GOING -eq 1 ]; then
+ unset all_except
+ unset no_armcc
+}
+
+pre_check_git () {
+ if [ $FORCE -eq 1 ]; then
+ rm -rf "$OUT_OF_SOURCE_DIR"
+ git checkout-index -f -q $CONFIG_H
+ cleanup
+ else
+
+ if [ -d "$OUT_OF_SOURCE_DIR" ]; then
+ echo "Warning - there is an existing directory at '$OUT_OF_SOURCE_DIR'" >&2
+ echo "You can either delete this directory manually, or force the test by rerunning"
+ echo "the script as: $0 --force --out-of-source-dir $OUT_OF_SOURCE_DIR"
+ exit 1
+ fi
+
+ if ! git diff --quiet include/mbedtls/mbedtls_config.h; then
+ err_msg "Warning - the configuration file 'include/mbedtls/mbedtls_config.h' has been edited. "
+ echo "You can either delete or preserve your work, or force the test by rerunning the"
+ echo "script as: $0 --force"
+ exit 1
+ fi
+ fi
+}
+
+pre_setup_keep_going () {
failure_summary=
failure_count=0
start_red=
@@ -302,7 +494,7 @@
failure_summary="$failure_summary
$text"
failure_count=$((failure_count + 1))
- echo "${start_red}^^^^$text^^^^${end_color}"
+ echo "${start_red}^^^^$text^^^^${end_color}" >&2
fi
}
make () {
@@ -326,6 +518,7 @@
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "${start_red}FAILED: $failure_count${end_color}$failure_summary"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+ exit 1
elif [ -z "${1-}" ]; then
echo "SUCCESS :)"
fi
@@ -333,53 +526,146 @@
echo "Killed by SIG$1."
fi
}
-else
- record_status () {
- "$@"
- }
-fi
+}
+
if_build_succeeded () {
if [ $build_status -eq 0 ]; then
record_status "$@"
fi
}
-msg "info: $0 configuration"
-echo "MEMORY: $MEMORY"
-echo "FORCE: $FORCE"
-echo "SEED: ${SEED-"UNSET"}"
-echo "OPENSSL: $OPENSSL"
-echo "OPENSSL_LEGACY: $OPENSSL_LEGACY"
-echo "GNUTLS_CLI: $GNUTLS_CLI"
-echo "GNUTLS_SERV: $GNUTLS_SERV"
-echo "GNUTLS_LEGACY_CLI: $GNUTLS_LEGACY_CLI"
-echo "GNUTLS_LEGACY_SERV: $GNUTLS_LEGACY_SERV"
-echo "ARMC5_BIN_DIR: $ARMC5_BIN_DIR"
-echo "ARMC6_BIN_DIR: $ARMC6_BIN_DIR"
+# to be used instead of ! for commands run with
+# record_status or if_build_succeeded
+not() {
+ ! "$@"
+}
-ARMC5_CC="$ARMC5_BIN_DIR/armcc"
-ARMC5_AR="$ARMC5_BIN_DIR/armar"
-ARMC6_CC="$ARMC6_BIN_DIR/armclang"
-ARMC6_AR="$ARMC6_BIN_DIR/armar"
+pre_setup_quiet_redirect () {
+ if [ $QUIET -ne 1 ]; then
+ redirect_out () {
+ "$@"
+ }
+ redirect_err () {
+ "$@"
+ }
+ else
+ redirect_out () {
+ "$@" >/dev/null
+ }
+ redirect_err () {
+ "$@" 2>/dev/null
+ }
+ fi
+}
-# To avoid setting OpenSSL and GnuTLS for each call to compat.sh and ssl-opt.sh
-# we just export the variables they require
-export OPENSSL_CMD="$OPENSSL"
-export GNUTLS_CLI="$GNUTLS_CLI"
-export GNUTLS_SERV="$GNUTLS_SERV"
+pre_prepare_outcome_file () {
+ case "$MBEDTLS_TEST_OUTCOME_FILE" in
+ [!/]*) MBEDTLS_TEST_OUTCOME_FILE="$PWD/$MBEDTLS_TEST_OUTCOME_FILE";;
+ esac
+ if [ -n "$MBEDTLS_TEST_OUTCOME_FILE" ] && [ "$append_outcome" -eq 0 ]; then
+ rm -f "$MBEDTLS_TEST_OUTCOME_FILE"
+ fi
+}
-# Avoid passing --seed flag in every call to ssl-opt.sh
-if [ -n "${SEED-}" ]; then
- export SEED
-fi
+pre_print_configuration () {
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
+ msg "info: $0 configuration"
+ echo "MEMORY: $MEMORY"
+ echo "FORCE: $FORCE"
+ echo "MBEDTLS_TEST_OUTCOME_FILE: ${MBEDTLS_TEST_OUTCOME_FILE:-(none)}"
+ echo "SEED: ${SEED-"UNSET"}"
+ echo
+ echo "OPENSSL: $OPENSSL"
+ echo "OPENSSL_LEGACY: $OPENSSL_LEGACY"
+ echo "OPENSSL_NEXT: $OPENSSL_NEXT"
+ echo "GNUTLS_CLI: $GNUTLS_CLI"
+ echo "GNUTLS_SERV: $GNUTLS_SERV"
+ echo "GNUTLS_LEGACY_CLI: $GNUTLS_LEGACY_CLI"
+ echo "GNUTLS_LEGACY_SERV: $GNUTLS_LEGACY_SERV"
+ echo "ARMC5_BIN_DIR: $ARMC5_BIN_DIR"
+ echo "ARMC6_BIN_DIR: $ARMC6_BIN_DIR"
+}
# Make sure the tools we need are available.
-check_tools "$OPENSSL" "$OPENSSL_LEGACY" "$GNUTLS_CLI" "$GNUTLS_SERV" \
- "$GNUTLS_LEGACY_CLI" "$GNUTLS_LEGACY_SERV" "doxygen" "dot" \
- "arm-none-eabi-gcc" "i686-w64-mingw32-gcc" "gdb"
-if [ $RUN_ARMCC -ne 0 ]; then
- check_tools "$ARMC5_CC" "$ARMC5_AR" "$ARMC6_CC" "$ARMC6_AR"
-fi
+pre_check_tools () {
+ # Build the list of variables to pass to output_env.sh.
+ set env
+
+ case " $RUN_COMPONENTS " in
+ # Require OpenSSL and GnuTLS if running any tests (as opposed to
+ # only doing builds). Not all tests run OpenSSL and GnuTLS, but this
+ # is a good enough approximation in practice.
+ *" test_"*)
+ # To avoid setting OpenSSL and GnuTLS for each call to compat.sh
+ # and ssl-opt.sh, we just export the variables they require.
+ export OPENSSL_CMD="$OPENSSL"
+ export GNUTLS_CLI="$GNUTLS_CLI"
+ export GNUTLS_SERV="$GNUTLS_SERV"
+ # Avoid passing --seed flag in every call to ssl-opt.sh
+ if [ -n "${SEED-}" ]; then
+ export SEED
+ fi
+ set "$@" OPENSSL="$OPENSSL" OPENSSL_LEGACY="$OPENSSL_LEGACY"
+ set "$@" GNUTLS_CLI="$GNUTLS_CLI" GNUTLS_SERV="$GNUTLS_SERV"
+ set "$@" GNUTLS_LEGACY_CLI="$GNUTLS_LEGACY_CLI"
+ set "$@" GNUTLS_LEGACY_SERV="$GNUTLS_LEGACY_SERV"
+ check_tools "$OPENSSL" "$OPENSSL_LEGACY" "$OPENSSL_NEXT" \
+ "$GNUTLS_CLI" "$GNUTLS_SERV" \
+ "$GNUTLS_LEGACY_CLI" "$GNUTLS_LEGACY_SERV"
+ ;;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_doxygen[_\ ]*) check_tools "doxygen" "dot";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_arm_none_eabi_gcc[_\ ]*) check_tools "${ARM_NONE_EABI_GCC_PREFIX}gcc";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_mingw[_\ ]*) check_tools "i686-w64-mingw32-gcc";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *" test_zeroize "*) check_tools "gdb";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_armcc*)
+ ARMC5_CC="$ARMC5_BIN_DIR/armcc"
+ ARMC5_AR="$ARMC5_BIN_DIR/armar"
+ ARMC5_FROMELF="$ARMC5_BIN_DIR/fromelf"
+ ARMC6_CC="$ARMC6_BIN_DIR/armclang"
+ ARMC6_AR="$ARMC6_BIN_DIR/armar"
+ ARMC6_FROMELF="$ARMC6_BIN_DIR/fromelf"
+ check_tools "$ARMC5_CC" "$ARMC5_AR" "$ARMC5_FROMELF" \
+ "$ARMC6_CC" "$ARMC6_AR" "$ARMC6_FROMELF";;
+ esac
+
+ # past this point, no call to check_tool, only printing output
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
+ msg "info: output_env.sh"
+ case $RUN_COMPONENTS in
+ *_armcc*)
+ set "$@" ARMC5_CC="$ARMC5_CC" ARMC6_CC="$ARMC6_CC" RUN_ARMCC=1;;
+ *) set "$@" RUN_ARMCC=0;;
+ esac
+ "$@" scripts/output_env.sh
+}
+
+pre_generate_files() {
+ # since make doesn't have proper dependencies, remove any possibly outdate
+ # file that might be around before generating fresh ones
+ make neat
+ make generated_files
+}
@@ -398,28 +684,73 @@
#
# Indicative running times are given for reference.
-msg "info: output_env.sh"
-OPENSSL="$OPENSSL" OPENSSL_LEGACY="$OPENSSL_LEGACY" GNUTLS_CLI="$GNUTLS_CLI" \
- GNUTLS_SERV="$GNUTLS_SERV" GNUTLS_LEGACY_CLI="$GNUTLS_LEGACY_CLI" \
- GNUTLS_LEGACY_SERV="$GNUTLS_LEGACY_SERV" ARMC5_CC="$ARMC5_CC" \
- ARMC6_CC="$ARMC6_CC" RUN_ARMCC="$RUN_ARMCC" scripts/output_env.sh
+component_check_recursion () {
+ msg "Check: recursion.pl" # < 1s
+ record_status tests/scripts/recursion.pl library/*.c
+}
-msg "test: recursion.pl" # < 1s
-tests/scripts/recursion.pl library/*.c
+component_check_generated_files () {
+ msg "Check: check-generated-files, files generated with make" # 2s
+ make generated_files
+ record_status tests/scripts/check-generated-files.sh
-msg "test: freshness of generated source files" # < 1s
-tests/scripts/check-generated-files.sh
+ msg "Check: check-generated-files -u, files present" # 2s
+ record_status tests/scripts/check-generated-files.sh -u
+ # Check that the generated files are considered up to date.
+ record_status tests/scripts/check-generated-files.sh
-msg "test: doxygen markup outside doxygen blocks" # < 1s
-tests/scripts/check-doxy-blocks.pl
+ msg "Check: check-generated-files -u, files absent" # 2s
+ command make neat
+ record_status tests/scripts/check-generated-files.sh -u
+ # Check that the generated files are considered up to date.
+ record_status tests/scripts/check-generated-files.sh
-msg "test/build: declared and exported names" # < 3s
-cleanup
-tests/scripts/check-names.py
+ # This component ends with the generated files present in the source tree.
+ # This is necessary for subsequent components!
+}
-msg "test: doxygen warnings" # ~ 3s
-cleanup
-tests/scripts/doxygen.sh
+component_check_doxy_blocks () {
+ msg "Check: doxygen markup outside doxygen blocks" # < 1s
+ record_status tests/scripts/check-doxy-blocks.pl
+}
+
+component_check_files () {
+ msg "Check: file sanity checks (permissions, encodings)" # < 1s
+ record_status tests/scripts/check_files.py
+}
+
+component_check_changelog () {
+ msg "Check: changelog entries" # < 1s
+ rm -f ChangeLog.new
+ record_status scripts/assemble_changelog.py -o ChangeLog.new
+ if [ -e ChangeLog.new ]; then
+ # Show the diff for information. It isn't an error if the diff is
+ # non-empty.
+ diff -u ChangeLog ChangeLog.new || true
+ rm ChangeLog.new
+ fi
+}
+
+component_check_names () {
+ msg "Check: declared and exported names (builds the library)" # < 3s
+ record_status tests/scripts/check-names.py -v
+}
+
+component_check_test_cases () {
+ msg "Check: test case descriptions" # < 1s
+ if [ $QUIET -eq 1 ]; then
+ opt='--quiet'
+ else
+ opt=''
+ fi
+ record_status tests/scripts/check_test_cases.py $opt
+ unset opt
+}
+
+component_check_doxygen_warnings () {
+ msg "Check: doxygen warnings (builds the documentation)" # ~ 3s
+ record_status tests/scripts/doxygen.sh
+}
@@ -427,329 +758,1666 @@
#### Build and test many configurations and targets
################################################################
-if [ $RUN_ARMCC -ne 0 ] && [ $YOTTA -ne 0 ]; then
- # Note - use of yotta is deprecated, and yotta also requires armcc to be on the
- # path, and uses whatever version of armcc it finds there.
- msg "build: create and build yotta module" # ~ 30s
- cleanup
- record_status tests/scripts/yotta-build.sh
-fi
+component_test_default_out_of_box () {
+ msg "build: make, default config (out-of-box)" # ~1min
+ make
+ # Disable fancy stuff
+ SAVE_MBEDTLS_TEST_OUTCOME_FILE="$MBEDTLS_TEST_OUTCOME_FILE"
+ unset MBEDTLS_TEST_OUTCOME_FILE
-msg "build: cmake, gcc, ASan" # ~ 1 min 50s
-cleanup
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
-
-msg "test: main suites (inc. selftests) (ASan build)" # ~ 50s
-make test
-
-msg "test: ssl-opt.sh (ASan build)" # ~ 1 min
-if_build_succeeded tests/ssl-opt.sh
-
-msg "test/build: ref-configs (ASan build)" # ~ 6 min 20s
-record_status tests/scripts/test-ref-configs.pl
-
-msg "build: with ASan (rebuild after ref-configs)" # ~ 1 min
-make
-
-msg "test: compat.sh (ASan build)" # ~ 6 min
-if_build_succeeded tests/compat.sh
-
-msg "build: Default + SSLv3 (ASan build)" # ~ 6 min
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_SSL_PROTO_SSL3
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
-
-msg "test: SSLv3 - main suites (inc. selftests) (ASan build)" # ~ 50s
-make test
-
-msg "build: SSLv3 - compat.sh (ASan build)" # ~ 6 min
-if_build_succeeded tests/compat.sh -m 'tls1 tls1_1 tls1_2 dtls1 dtls1_2'
-if_build_succeeded env OPENSSL_CMD="$OPENSSL_LEGACY" tests/compat.sh -m 'ssl3'
-
-msg "build: SSLv3 - ssl-opt.sh (ASan build)" # ~ 6 min
-if_build_succeeded tests/ssl-opt.sh
-
-msg "build: Default + !MBEDTLS_SSL_RENEGOTIATION (ASan build)" # ~ 6 min
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl unset MBEDTLS_SSL_RENEGOTIATION
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
-
-msg "test: !MBEDTLS_SSL_RENEGOTIATION - main suites (inc. selftests) (ASan build)" # ~ 50s
-make test
-
-msg "test: !MBEDTLS_SSL_RENEGOTIATION - ssl-opt.sh (ASan build)" # ~ 6 min
-if_build_succeeded tests/ssl-opt.sh
-
-msg "build: Default + RSA_NO_CRT (ASan build)" # ~ 6 min
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_RSA_NO_CRT
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
-
-msg "test: RSA_NO_CRT - main suites (inc. selftests) (ASan build)" # ~ 50s
-make test
-
-msg "test: RSA_NO_CRT - RSA-related part of ssl-opt.sh (ASan build)" # ~ 5s
-tests/ssl-opt.sh -f RSA
-
-msg "test: RSA_NO_CRT - RSA-related part of compat.sh (ASan build)" # ~ 3 min
-tests/compat.sh -t RSA
-
-msg "build: cmake, full config, clang" # ~ 50s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # too slow for tests
-CC=clang cmake -D CMAKE_BUILD_TYPE:String=Check -D ENABLE_TESTING=On .
-make
-
-msg "test: main suites (full config)" # ~ 5s
-make test
-
-msg "test: ssl-opt.sh default (full config)" # ~ 1s
-if_build_succeeded tests/ssl-opt.sh -f Default
-
-msg "test: compat.sh RC4, DES & NULL (full config)" # ~ 2 min
-if_build_succeeded env OPENSSL_CMD="$OPENSSL_LEGACY" GNUTLS_CLI="$GNUTLS_LEGACY_CLI" GNUTLS_SERV="$GNUTLS_LEGACY_SERV" tests/compat.sh -e '3DES\|DES-CBC3' -f 'NULL\|DES\|RC4\|ARCFOUR'
-
-msg "test/build: curves.pl (gcc)" # ~ 4 min
-cleanup
-record_status tests/scripts/curves.pl
-
-msg "test/build: depends-hashes.pl (gcc)" # ~ 2 min
-cleanup
-record_status tests/scripts/depends-hashes.pl
-
-msg "test/build: depends-pkalgs.pl (gcc)" # ~ 2 min
-cleanup
-record_status tests/scripts/depends-pkalgs.pl
-
-msg "test/build: key-exchanges (gcc)" # ~ 1 min
-cleanup
-record_status tests/scripts/key-exchanges.pl
-
-msg "build: Unix make, -Os (gcc)" # ~ 30s
-cleanup
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -Os'
-
-# Full configuration build, without platform support, file IO and net sockets.
-# This should catch missing mbedtls_printf definitions, and by disabling file
-# IO, it should catch missing '#include <stdio.h>'
-msg "build: full config except platform/fsio/net, make, gcc, C99" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_PLATFORM_C
-scripts/config.pl unset MBEDTLS_NET_C
-scripts/config.pl unset MBEDTLS_PLATFORM_MEMORY
-scripts/config.pl unset MBEDTLS_PLATFORM_PRINTF_ALT
-scripts/config.pl unset MBEDTLS_PLATFORM_FPRINTF_ALT
-scripts/config.pl unset MBEDTLS_PLATFORM_SNPRINTF_ALT
-scripts/config.pl unset MBEDTLS_PLATFORM_TIME_ALT
-scripts/config.pl unset MBEDTLS_PLATFORM_EXIT_ALT
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-scripts/config.pl unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
-scripts/config.pl unset MBEDTLS_FS_IO
-# Note, _DEFAULT_SOURCE needs to be defined for platforms using glibc version >2.19,
-# to re-enable platform integration features otherwise disabled in C99 builds
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -std=c99 -pedantic -O0 -D_DEFAULT_SOURCE' lib programs
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -O0' test
-
-# catch compile bugs in _uninit functions
-msg "build: full config with NO_STD_FUNCTION, make, gcc" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl set MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -O0'
-
-msg "build: full config except ssl_srv.c, make, gcc" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_SSL_SRV_C
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -O0'
-
-msg "build: full config except ssl_cli.c, make, gcc" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_SSL_CLI_C
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -O0'
-
-# Note, C99 compliance can also be tested with the sockets support disabled,
-# as that requires a POSIX platform (which isn't the same as C99).
-msg "build: full config except net_sockets.c, make, gcc -std=c99 -pedantic" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_NET_C # getaddrinfo() undeclared, etc.
-scripts/config.pl set MBEDTLS_NO_PLATFORM_ENTROPY # uses syscall() on GNU/Linux
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -O0 -std=c99 -pedantic' lib
-
-msg "build: default config except MFL extension (ASan build)" # ~ 30s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl unset MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
-
-msg "test: ssl-opt.sh, MFL-related tests"
-if_build_succeeded tests/ssl-opt.sh -f "Max fragment length"
-
-msg "build: default config with MBEDTLS_TEST_NULL_ENTROPY (ASan build)"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_TEST_NULL_ENTROPY
-scripts/config.pl set MBEDTLS_NO_DEFAULT_ENTROPY_SOURCES
-scripts/config.pl set MBEDTLS_ENTROPY_C
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-scripts/config.pl unset MBEDTLS_ENTROPY_HARDWARE_ALT
-scripts/config.pl unset MBEDTLS_HAVEGE_C
-CC=gcc cmake -D UNSAFE_BUILD=ON -D CMAKE_C_FLAGS:String="-fsanitize=address -fno-common -O3" .
-make
-
-msg "test: MBEDTLS_TEST_NULL_ENTROPY - main suites (inc. selftests) (ASan build)"
-make test
-
-msg "build: default config with AES_FEWER_TABLES enabled"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_AES_FEWER_TABLES
-make CC=gcc CFLAGS='-Werror -Wall -Wextra'
-
-msg "test: AES_FEWER_TABLES"
-make test
-
-msg "build: default config with AES_ROM_TABLES enabled"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_AES_ROM_TABLES
-make CC=gcc CFLAGS='-Werror -Wall -Wextra'
-
-msg "test: AES_ROM_TABLES"
-make test
-
-msg "build: default config with AES_ROM_TABLES and AES_FEWER_TABLES enabled"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_AES_FEWER_TABLES
-scripts/config.pl set MBEDTLS_AES_ROM_TABLES
-make CC=gcc CFLAGS='-Werror -Wall -Wextra'
-
-msg "test: AES_FEWER_TABLES + AES_ROM_TABLES"
-make test
-
-if uname -a | grep -F Linux >/dev/null; then
- msg "build/test: make shared" # ~ 40s
- cleanup
- make SHARED=1 all check
-fi
-
-if uname -a | grep -F x86_64 >/dev/null; then
- msg "build: i386, make, gcc" # ~ 30s
- cleanup
- make CC=gcc CFLAGS='-Werror -Wall -Wextra -m32'
-
- msg "test: i386, make, gcc"
+ msg "test: main suites make, default config (out-of-box)" # ~10s
make test
+ msg "selftest: make, default config (out-of-box)" # ~10s
+ if_build_succeeded programs/test/selftest
+
+ export MBEDTLS_TEST_OUTCOME_FILE="$SAVE_MBEDTLS_TEST_OUTCOME_FILE"
+ unset SAVE_MBEDTLS_TEST_OUTCOME_FILE
+}
+
+component_test_default_cmake_gcc_asan () {
+ msg "build: cmake, gcc, ASan" # ~ 1 min 50s
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ if_build_succeeded programs/test/selftest
+
+ msg "test: ssl-opt.sh (ASan build)" # ~ 1 min
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh (ASan build)" # ~ 6 min
+ if_build_succeeded tests/compat.sh
+
+ msg "test: context-info.sh (ASan build)" # ~ 15 sec
+ if_build_succeeded tests/context-info.sh
+}
+
+component_test_full_cmake_gcc_asan () {
+ msg "build: full config, cmake, gcc, ASan"
+ scripts/config.py full
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (full config, ASan build)"
+ make test
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ if_build_succeeded programs/test/selftest
+
+ msg "test: ssl-opt.sh (full config, ASan build)"
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh (full config, ASan build)"
+ if_build_succeeded tests/compat.sh
+
+ msg "test: context-info.sh (full config, ASan build)" # ~ 15 sec
+ if_build_succeeded tests/context-info.sh
+}
+
+component_test_psa_crypto_key_id_encodes_owner () {
+ msg "build: full config - USE_PSA_CRYPTO + PSA_CRYPTO_KEY_ID_ENCODES_OWNER, cmake, gcc, ASan"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full config - USE_PSA_CRYPTO + PSA_CRYPTO_KEY_ID_ENCODES_OWNER, cmake, gcc, ASan"
+ make test
+}
+
+# check_renamed_symbols HEADER LIB
+# Check that if HEADER contains '#define MACRO ...' then MACRO is not a symbol
+# name is LIB.
+check_renamed_symbols () {
+ ! nm "$2" | sed 's/.* //' |
+ grep -x -F "$(sed -n 's/^ *# *define *\([A-Z_a-z][0-9A-Z_a-z]*\)..*/\1/p' "$1")"
+}
+
+component_build_psa_crypto_spm () {
+ msg "build: full config - USE_PSA_CRYPTO + PSA_CRYPTO_KEY_ID_ENCODES_OWNER + PSA_CRYPTO_SPM, make, gcc"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_SPM
+ # We can only compile, not link, since our test and sample programs
+ # aren't equipped for the modified names used when MBEDTLS_PSA_CRYPTO_SPM
+ # is active.
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -I../tests/include/spe' lib
+
+ # Check that if a symbol is renamed by crypto_spe.h, the non-renamed
+ # version is not present.
+ echo "Checking for renamed symbols in the library"
+ if_build_succeeded check_renamed_symbols tests/include/spe/crypto_spe.h library/libmbedcrypto.a
+}
+
+component_test_psa_crypto_client () {
+ msg "build: default config - PSA_CRYPTO_C + PSA_CRYPTO_CLIENT, make"
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CLIENT
+ make
+
+ msg "test: default config - PSA_CRYPTO_C + PSA_CRYPTO_CLIENT, make"
+ make test
+}
+
+component_test_psa_crypto_rsa_no_genprime() {
+ msg "build: default config minus MBEDTLS_GENPRIME"
+ scripts/config.py unset MBEDTLS_GENPRIME
+ make
+
+ msg "test: default config minus MBEDTLS_GENPRIME"
+ make test
+}
+
+component_test_ref_configs () {
+ msg "test/build: ref-configs (ASan build)" # ~ 6 min 20s
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ record_status tests/scripts/test-ref-configs.pl
+}
+
+component_test_no_renegotiation () {
+ msg "build: Default + !MBEDTLS_SSL_RENEGOTIATION (ASan build)" # ~ 6 min
+ scripts/config.py unset MBEDTLS_SSL_RENEGOTIATION
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_SSL_RENEGOTIATION - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: !MBEDTLS_SSL_RENEGOTIATION - ssl-opt.sh (ASan build)" # ~ 6 min
+ if_build_succeeded tests/ssl-opt.sh
+}
+
+component_test_no_pem_no_fs () {
+ msg "build: Default + !MBEDTLS_PEM_PARSE_C + !MBEDTLS_FS_IO (ASan build)"
+ scripts/config.py unset MBEDTLS_PEM_PARSE_C
+ scripts/config.py unset MBEDTLS_FS_IO
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C # requires a filesystem
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C # requires PSA ITS
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_PEM_PARSE_C !MBEDTLS_FS_IO - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: !MBEDTLS_PEM_PARSE_C !MBEDTLS_FS_IO - ssl-opt.sh (ASan build)" # ~ 6 min
+ if_build_succeeded tests/ssl-opt.sh
+}
+
+component_test_rsa_no_crt () {
+ msg "build: Default + RSA_NO_CRT (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_RSA_NO_CRT
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: RSA_NO_CRT - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: RSA_NO_CRT - RSA-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ if_build_succeeded tests/ssl-opt.sh -f RSA
+
+ msg "test: RSA_NO_CRT - RSA-related part of compat.sh (ASan build)" # ~ 3 min
+ if_build_succeeded tests/compat.sh -t RSA
+
+ msg "test: RSA_NO_CRT - RSA-related part of context-info.sh (ASan build)" # ~ 15 sec
+ if_build_succeeded tests/context-info.sh
+}
+
+component_test_no_ctr_drbg_classic () {
+ msg "build: Full minus CTR_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus CTR_DRBG, classic crypto - main suites"
+ make test
+
+ # In this configuration, the TLS test programs use HMAC_DRBG.
+ # The SSL tests are slow, so run a small subset, just enough to get
+ # confidence that the SSL code copes with HMAC_DRBG.
+ msg "test: Full minus CTR_DRBG, classic crypto - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|SSL async private.*delay=\|tickets enabled on server'
+
+ msg "test: Full minus CTR_DRBG, classic crypto - compat.sh (subset)"
+ if_build_succeeded tests/compat.sh -m tls1_2 -t 'ECDSA PSK' -V NO -p OpenSSL
+}
+
+component_test_no_ctr_drbg_use_psa () {
+ msg "build: Full minus CTR_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - main suites"
+ make test
+
+ # In this configuration, the TLS test programs use HMAC_DRBG.
+ # The SSL tests are slow, so run a small subset, just enough to get
+ # confidence that the SSL code copes with HMAC_DRBG.
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|SSL async private.*delay=\|tickets enabled on server'
+
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - compat.sh (subset)"
+ if_build_succeeded tests/compat.sh -m tls1_2 -t 'ECDSA PSK' -V NO -p OpenSSL
+}
+
+component_test_no_hmac_drbg_classic () {
+ msg "build: Full minus HMAC_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus HMAC_DRBG, classic crypto - main suites"
+ make test
+
+ # Normally our ECDSA implementation uses deterministic ECDSA. But since
+ # HMAC_DRBG is disabled in this configuration, randomized ECDSA is used
+ # instead.
+ # Test SSL with non-deterministic ECDSA. Only test features that
+ # might be affected by how ECDSA signature is performed.
+ msg "test: Full minus HMAC_DRBG, classic crypto - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|SSL async private: sign'
+
+ # To save time, only test one protocol version, since this part of
+ # the protocol is identical in (D)TLS up to 1.2.
+ msg "test: Full minus HMAC_DRBG, classic crypto - compat.sh (ECDSA)"
+ if_build_succeeded tests/compat.sh -m tls1_2 -t 'ECDSA'
+}
+
+component_test_no_hmac_drbg_use_psa () {
+ msg "build: Full minus HMAC_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - main suites"
+ make test
+
+ # Normally our ECDSA implementation uses deterministic ECDSA. But since
+ # HMAC_DRBG is disabled in this configuration, randomized ECDSA is used
+ # instead.
+ # Test SSL with non-deterministic ECDSA. Only test features that
+ # might be affected by how ECDSA signature is performed.
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|SSL async private: sign'
+
+ # To save time, only test one protocol version, since this part of
+ # the protocol is identical in (D)TLS up to 1.2.
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - compat.sh (ECDSA)"
+ if_build_succeeded tests/compat.sh -m tls1_2 -t 'ECDSA'
+}
+
+component_test_psa_external_rng_no_drbg_classic () {
+ msg "build: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py unset MBEDTLS_ENTROPY_C
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ # When MBEDTLS_USE_PSA_CRYPTO is disabled and there is no DRBG,
+ # the SSL test programs don't have an RNG and can't work. Explicitly
+ # make them use the PSA RNG with -DMBEDTLS_TEST_USE_PSA_CRYPTO_RNG.
+ make CFLAGS="$ASAN_CFLAGS -O2 -DMBEDTLS_TEST_USE_PSA_CRYPTO_RNG" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto - main suites"
+ make test
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default'
+}
+
+component_test_psa_external_rng_no_drbg_use_psa () {
+ msg "build: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py unset MBEDTLS_ENTROPY_C
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ make CFLAGS="$ASAN_CFLAGS -O2" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto - main suites"
+ make test
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto - ssl-opt.sh (subset)"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|opaque'
+}
+
+component_test_psa_external_rng_use_psa_crypto () {
+ msg "build: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ make CFLAGS="$ASAN_CFLAGS -O2" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ make test
+
+ msg "test: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|opaque'
+}
+
+component_test_everest () {
+ msg "build: Everest ECDH context (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Everest ECDH context - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ if_build_succeeded tests/ssl-opt.sh -f ECDH
+
+ msg "test: Everest ECDH context - compat.sh with some ECDH ciphersuites (ASan build)" # ~ 3 min
+ # Exclude some symmetric ciphers that are redundant here to gain time.
+ if_build_succeeded tests/compat.sh -f ECDH -V NO -e 'ARIA\|CAMELLIA\|CHACHA\|DES'
+}
+
+component_test_everest_curve25519_only () {
+ msg "build: Everest ECDH context, only Curve25519" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ # Disable all curves
+ for c in $(sed -n 's/#define \(MBEDTLS_ECP_DP_[0-9A-Z_a-z]*_ENABLED\).*/\1/p' <"$CONFIG_H"); do
+ scripts/config.py unset "$c"
+ done
+ scripts/config.py set MBEDTLS_ECP_DP_CURVE25519_ENABLED
+
+ make CFLAGS="$ASAN_CFLAGS -O2" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: Everest ECDH context, only Curve25519" # ~ 50s
+ make test
+}
+
+component_test_small_ssl_out_content_len () {
+ msg "build: small SSL_OUT_CONTENT_LEN (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 16384
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 4096
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small SSL_OUT_CONTENT_LEN - ssl-opt.sh MFL and large packet tests"
+ if_build_succeeded tests/ssl-opt.sh -f "Max fragment\|Large packet"
+}
+
+component_test_small_ssl_in_content_len () {
+ msg "build: small SSL_IN_CONTENT_LEN (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 4096
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 16384
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small SSL_IN_CONTENT_LEN - ssl-opt.sh MFL tests"
+ if_build_succeeded tests/ssl-opt.sh -f "Max fragment"
+}
+
+component_test_small_ssl_dtls_max_buffering () {
+ msg "build: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #0"
+ scripts/config.py set MBEDTLS_SSL_DTLS_MAX_BUFFERING 1000
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #0 - ssl-opt.sh specific reordering test"
+ if_build_succeeded tests/ssl-opt.sh -f "DTLS reordering: Buffer out-of-order hs msg before reassembling next, free buffered msg"
+}
+
+component_test_small_mbedtls_ssl_dtls_max_buffering () {
+ msg "build: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #1"
+ scripts/config.py set MBEDTLS_SSL_DTLS_MAX_BUFFERING 190
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #1 - ssl-opt.sh specific reordering test"
+ if_build_succeeded tests/ssl-opt.sh -f "DTLS reordering: Buffer encrypted Finished message, drop for fragmented NewSessionTicket"
+}
+
+component_test_psa_collect_statuses () {
+ msg "build+test: psa_collect_statuses" # ~30s
+ scripts/config.py full
+ record_status tests/scripts/psa_collect_statuses.py
+ # Check that psa_crypto_init() succeeded at least once
+ record_status grep -q '^0:psa_crypto_init:' tests/statuses.log
+ rm -f tests/statuses.log
+}
+
+component_test_full_cmake_clang () {
+ msg "build: cmake, full config, clang" # ~ 50s
+ scripts/config.py full
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=Check -D ENABLE_TESTING=On .
+ make
+
+ msg "test: main suites (full config, clang)" # ~ 5s
+ make test
+
+ msg "test: psa_constant_names (full config, clang)" # ~ 1s
+ record_status tests/scripts/test_psa_constant_names.py
+
+ msg "test: ssl-opt.sh default, ECJPAKE, SSL async (full config)" # ~ 1s
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|ECJPAKE\|SSL async private'
+
+ msg "test: compat.sh DES, 3DES & NULL (full config)" # ~ 2 min
+ if_build_succeeded env OPENSSL_CMD="$OPENSSL_LEGACY" GNUTLS_CLI="$GNUTLS_LEGACY_CLI" GNUTLS_SERV="$GNUTLS_LEGACY_SERV" tests/compat.sh -e '^$' -f 'NULL\|DES'
+
+ msg "test: compat.sh ARIA + ChachaPoly"
+ if_build_succeeded env OPENSSL_CMD="$OPENSSL_NEXT" tests/compat.sh -e '^$' -f 'ARIA\|CHACHA'
+}
+
+component_test_memsan_constant_flow () {
+ # This tests both (1) accesses to undefined memory, and (2) branches or
+ # memory access depending on secret values. To distinguish between those:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN - does the failure persist?
+ # - or alternatively, change the build type to MemSanDbg, which enables
+ # origin tracking and nicer stack traces (which are useful for debugging
+ # anyway), and check if the origin was TEST_CF_SECRET() or something else.
+ msg "build: cmake MSan (clang), full config with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN
+ scripts/config.py unset MBEDTLS_AESNI_C # memsan doesn't grok asm
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=MemSan .
+ make
+
+ msg "test: main suites (Msan + constant flow)"
+ make test
+}
+
+component_test_valgrind_constant_flow () {
+ # This tests both (1) everything that valgrind's memcheck usually checks
+ # (heap buffer overflows, use of uninitialized memory, use-after-free,
+ # etc.) and (2) branches or memory access depending on secret values,
+ # which will be reported as uninitialized memory. To distinguish between
+ # secret and actually uninitialized:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND - does the failure persist?
+ # - or alternatively, build with debug info and manually run the offending
+ # test suite with valgrind --track-origins=yes, then check if the origin
+ # was TEST_CF_SECRET() or something else.
+ msg "build: cmake release GCC, full config with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND
+ cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make
+
+ # this only shows a summary of the results (how many of each type)
+ # details are left in Testing/<date>/DynamicAnalysis.xml
+ msg "test: main suites (valgrind + constant flow)"
+ make memcheck
+}
+
+component_test_default_no_deprecated () {
+ # Test that removing the deprecated features from the default
+ # configuration leaves something consistent.
+ msg "build: make, default + MBEDTLS_DEPRECATED_REMOVED" # ~ 30s
+ scripts/config.py set MBEDTLS_DEPRECATED_REMOVED
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, default + MBEDTLS_DEPRECATED_REMOVED" # ~ 5s
+ make test
+}
+
+component_test_full_no_deprecated () {
+ msg "build: make, full_no_deprecated config" # ~ 30s
+ scripts/config.py full_no_deprecated
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, full_no_deprecated config" # ~ 5s
+ make test
+}
+
+component_test_full_no_deprecated_deprecated_warning () {
+ # Test that there is nothing deprecated in "full_no_deprecated".
+ # A deprecated feature would trigger a warning (made fatal) from
+ # MBEDTLS_DEPRECATED_WARNING.
+ msg "build: make, full_no_deprecated config, MBEDTLS_DEPRECATED_WARNING" # ~ 30s
+ scripts/config.py full_no_deprecated
+ scripts/config.py unset MBEDTLS_DEPRECATED_REMOVED
+ scripts/config.py set MBEDTLS_DEPRECATED_WARNING
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, full_no_deprecated config, MBEDTLS_DEPRECATED_WARNING" # ~ 5s
+ make test
+}
+
+component_test_full_deprecated_warning () {
+ # Test that when MBEDTLS_DEPRECATED_WARNING is enabled, the build passes
+ # with only certain whitelisted types of warnings.
+ msg "build: make, full config + MBEDTLS_DEPRECATED_WARNING, expect warnings" # ~ 30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_DEPRECATED_WARNING
+ # Expect warnings from '#warning' directives in check_config.h.
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra -Wno-error=cpp' lib programs
+
+ msg "build: make tests, full config + MBEDTLS_DEPRECATED_WARNING, expect warnings" # ~ 30s
+ # Set MBEDTLS_TEST_DEPRECATED to enable tests for deprecated features.
+ # By default those are disabled when MBEDTLS_DEPRECATED_WARNING is set.
+ # Expect warnings from '#warning' directives in check_config.h and
+ # from the use of deprecated functions in test suites.
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra -Wno-error=deprecated-declarations -Wno-error=cpp -DMBEDTLS_TEST_DEPRECATED' tests
+
+ msg "test: full config + MBEDTLS_TEST_DEPRECATED" # ~ 30s
+ make test
+}
+
+# Check that the specified libraries exist and are empty.
+are_empty_libraries () {
+ nm "$@" >/dev/null 2>/dev/null
+ ! nm "$@" 2>/dev/null | grep -v ':$' | grep .
+}
+
+component_build_crypto_default () {
+ msg "build: make, crypto only"
+ scripts/config.py crypto
+ make CFLAGS='-O1 -Werror'
+ if_build_succeeded are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_build_crypto_full () {
+ msg "build: make, crypto only, full config"
+ scripts/config.py crypto_full
+ make CFLAGS='-O1 -Werror'
+ if_build_succeeded are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_build_crypto_baremetal () {
+ msg "build: make, crypto only, baremetal config"
+ scripts/config.py crypto_baremetal
+ make CFLAGS='-O1 -Werror'
+ if_build_succeeded are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_test_depends_curves () {
+ msg "test/build: curves.pl (gcc)" # ~ 4 min
+ record_status tests/scripts/curves.pl
+}
+
+component_test_depends_curves_psa () {
+ msg "test/build: curves.pl with MBEDTLS_USE_PSA_CRYPTO defined (gcc)"
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ record_status tests/scripts/curves.pl
+}
+
+component_test_depends_hashes () {
+ msg "test/build: depends-hashes.pl (gcc)" # ~ 2 min
+ record_status tests/scripts/depends-hashes.pl
+}
+
+component_test_depends_hashes_psa () {
+ msg "test/build: depends-hashes.pl with MBEDTLS_USE_PSA_CRYPTO defined (gcc)"
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ record_status tests/scripts/depends-hashes.pl
+}
+
+component_test_depends_pkalgs () {
+ msg "test/build: depends-pkalgs.pl (gcc)" # ~ 2 min
+ record_status tests/scripts/depends-pkalgs.pl
+}
+
+component_test_depends_pkalgs_psa () {
+ msg "test/build: depends-pkalgs.pl with MBEDTLS_USE_PSA_CRYPTO defined (gcc)"
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ record_status tests/scripts/depends-pkalgs.pl
+}
+
+component_build_key_exchanges () {
+ msg "test/build: key-exchanges (gcc)" # ~ 1 min
+ record_status tests/scripts/key-exchanges.pl
+}
+
+component_build_default_make_gcc_and_cxx () {
+ msg "build: Unix make, -Os (gcc)" # ~ 30s
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -Os'
+
+ msg "test: verify header list in cpp_dummy_build.cpp"
+ record_status check_headers_in_cpp
+
+ msg "build: Unix make, incremental g++"
+ make TEST_CPP=1
+}
+
+component_build_module_alt () {
+ msg "build: MBEDTLS_XXX_ALT" # ~30s
+ scripts/config.py full
+ # Disable options that are incompatible with some ALT implementations.
+ # aesni.c and padlock.c reference mbedtls_aes_context fields directly.
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ # You can only have one threading implementation: alt or pthread, not both.
+ scripts/config.py unset MBEDTLS_THREADING_PTHREAD
+ # The SpecifiedECDomain parsing code accesses mbedtls_ecp_group fields
+ # directly and assumes the implementation works with partial groups.
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_EXTENDED
+ # Enable all MBEDTLS_XXX_ALT for whole modules. Do not enable
+ # MBEDTLS_XXX_YYY_ALT which are for single functions.
+ scripts/config.py set-all 'MBEDTLS_([A-Z0-9]*|NIST_KW)_ALT'
+ scripts/config.py unset MBEDTLS_DHM_ALT #incompatible with MBEDTLS_DEBUG_C
+ # We can only compile, not link, since we don't have any implementations
+ # suitable for testing with the dummy alt headers.
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -I../tests/include/alt-dummy' lib
+}
+
+component_build_dhm_alt () {
+ msg "build: MBEDTLS_DHM_ALT" # ~30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_DHM_ALT
+ # debug.c currently references mbedtls_dhm_context fields directly.
+ scripts/config.py unset MBEDTLS_DEBUG_C
+ # We can only compile, not link, since we don't have any implementations
+ # suitable for testing with the dummy alt headers.
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -I../tests/include/alt-dummy' lib
+}
+
+component_test_no_use_psa_crypto_full_cmake_asan() {
+ # full minus MBEDTLS_USE_PSA_CRYPTO: run the same set of tests as basic-build-test.sh
+ msg "build: cmake, full config minus MBEDTLS_USE_PSA_CRYPTO, ASan"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_ECP_RESTARTABLE # not using PSA, so enable restartable ECC
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (full minus MBEDTLS_USE_PSA_CRYPTO)"
+ make test
+
+ msg "test: ssl-opt.sh (full minus MBEDTLS_USE_PSA_CRYPTO)"
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh default (full minus MBEDTLS_USE_PSA_CRYPTO)"
+ if_build_succeeded tests/compat.sh
+
+ msg "test: compat.sh DES & NULL (full minus MBEDTLS_USE_PSA_CRYPTO)"
+ if_build_succeeded env OPENSSL_CMD="$OPENSSL_LEGACY" GNUTLS_CLI="$GNUTLS_LEGACY_CLI" GNUTLS_SERV="$GNUTLS_LEGACY_SERV" tests/compat.sh -e '3DES\|DES-CBC3' -f 'NULL\|DES'
+
+ msg "test: compat.sh ARIA + ChachaPoly (full minus MBEDTLS_USE_PSA_CRYPTO)"
+ if_build_succeeded env OPENSSL_CMD="$OPENSSL_NEXT" tests/compat.sh -e '^$' -f 'ARIA\|CHACHA'
+}
+
+component_test_psa_crypto_config_basic() {
+ # Test the library excluding all Mbed TLS cryptographic support for which
+ # we have an accelerator support. Acceleration is faked with the
+ # transparent test driver.
+ msg "test: full + MBEDTLS_PSA_CRYPTO_CONFIG + as much acceleration as supported"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+
+ # There is no intended accelerator support for ALG STREAM_CIPHER and
+ # ALG_ECB_NO_PADDING. Therefore, asking for them in the build implies the
+ # inclusion of the Mbed TLS cipher operations. As we want to test here with
+ # cipher operations solely supported by accelerators, disabled those
+ # PSA configuration options.
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_STREAM_CIPHER
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_ECB_NO_PADDING
+
+ # Don't test DES encryption as:
+ # 1) It is not an issue if we don't test all cipher types here.
+ # 2) That way we don't have to modify in psa_crypto.c the compilation
+ # guards MBEDTLS_PSA_BUILTIN_KEY_TYPE_DES for the code they guard to be
+ # available to the test driver. Modifications that we would need to
+ # revert when we move to compile the test driver separately.
+ # We also disable MBEDTLS_DES_C as the dependencies on DES in PSA test
+ # suites are still based on MBEDTLS_DES_C and not PSA_WANT_KEY_TYPE_DES.
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_KEY_TYPE_DES
+ scripts/config.py unset MBEDTLS_DES_C
+
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ loc_cflags="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_AES"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_CAMELLIA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_ECC_KEY_PAIR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_KEY_PAIR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CBC_NO_PADDING"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CBC_PKCS7"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CTR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CFB"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_ECDSA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_DETERMINISTIC_ECDSA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_MD5"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_OFB"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RIPEMD160"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_SIGN"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RSA_PSS"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_1"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_224"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_256"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_384"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_512"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_XTS"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CMAC"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_HMAC"
+ loc_cflags="${loc_cflags} -I../tests/include -O2"
+
+ make CC=gcc CFLAGS="$loc_cflags" LDFLAGS="$ASAN_CFLAGS"
+ unset loc_cflags
+
+ msg "test: full + MBEDTLS_PSA_CRYPTO_CONFIG"
+ make test
+}
+
+component_test_psa_crypto_config_no_driver() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG minus MBEDTLS_PSA_CRYPTO_DRIVERS"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -O2" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full + MBEDTLS_PSA_CRYPTO_CONFIG minus MBEDTLS_PSA_CRYPTO_DRIVERS"
+ make test
+}
+
+# This should be renamed to test and updated once the accelerator ECDSA code is in place and ready to test.
+component_build_psa_accel_alg_ecdsa() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_ECDSA
+ # without MBEDTLS_ECDSA_C
+ # PSA_WANT_ALG_ECDSA and PSA_WANT_ALG_DETERMINISTIC_ECDSA are already
+ # set in include/psa/crypto_config.h
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_ECDSA without MBEDTLS_ECDSA_C"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_ECDSA -DMBEDTLS_PSA_ACCEL_ALG_DETERMINISTIC_ECDSA -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator ECDH code is in place and ready to test.
+component_build_psa_accel_alg_ecdh() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_ECDH
+ # without MBEDTLS_ECDH_C
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_ECDH without MBEDTLS_ECDH_C"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_ECDH -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator ECC key pair code is in place and ready to test.
+component_build_psa_accel_key_type_ecc_key_pair() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_KEY_TYPE_ECC_KEY_PAIR
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_KEY_TYPE_ECC_KEY_PAIR"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_KEY_TYPE_ECC_KEY_PAIR 1
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY 1
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_ECC_KEY_PAIR -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator ECC public key code is in place and ready to test.
+component_build_psa_accel_key_type_ecc_public_key() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_KEY_TYPE_ECC_PUBLIC_KEY 1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_KEY_TYPE_ECC_KEY_PAIR
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_ECC_PUBLIC_KEY -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator HMAC code is in place and ready to test.
+component_build_psa_accel_alg_hmac() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_HMAC
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_HMAC"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_HMAC -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator HKDF code is in place and ready to test.
+component_build_psa_accel_alg_hkdf() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_HKDF
+ # without MBEDTLS_HKDF_C
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_HKDF without MBEDTLS_HKDF_C"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_HKDF_C
+ # Make sure to unset TLS1_3_EXPERIMENTAL since it requires HKDF_C and will not build properly without it.
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_HKDF -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator MD5 code is in place and ready to test.
+component_build_psa_accel_alg_md5() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_MD5 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_MD5 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_MD5 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RIPEMD160 code is in place and ready to test.
+component_build_psa_accel_alg_ripemd160() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_RIPEMD160 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_RIPEMD160 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RIPEMD160 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA1 code is in place and ready to test.
+component_build_psa_accel_alg_sha1() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_SHA_1 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_SHA_1 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_1 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA224 code is in place and ready to test.
+component_build_psa_accel_alg_sha224() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_SHA_224 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_SHA_224 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_224 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA256 code is in place and ready to test.
+component_build_psa_accel_alg_sha256() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_SHA_256 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_SHA_256 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_256 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA384 code is in place and ready to test.
+component_build_psa_accel_alg_sha384() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_SHA_384 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_SHA_384 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_256
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_384 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA512 code is in place and ready to test.
+component_build_psa_accel_alg_sha512() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_SHA_512 without other hashes
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_SHA_512 - other hashes"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_MD5
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_SHA_384
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_512 -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pkcs1v15_crypt() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_RSA_PKCS1V15_CRYPT + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_PKCS1V15_CRYPT 1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_OAEP
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_CRYPT -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pkcs1v15_sign() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_RSA_PKCS1V15_SIGN and PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_RSA_PKCS1V15_SIGN + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_PKCS1V15_SIGN 1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_OAEP
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_SIGN -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_oaep() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_RSA_OAEP and PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_RSA_OAEP + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_OAEP 1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_OAEP -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pss() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_ALG_RSA_PSS and PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_ALG_RSA_PSS + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_RSA_OAEP
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PSS -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_key_type_rsa_key_pair() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_KEY_TYPE_RSA_KEY_PAIR and PSA_WANT_ALG_RSA_PSS
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_KEY_TYPE_RSA_KEY_PAIR + PSA_WANT_ALG_RSA_PSS"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_KEY_TYPE_RSA_KEY_PAIR 1
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_KEY_PAIR -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_key_type_rsa_public_key() {
+ # full plus MBEDTLS_PSA_CRYPTO_CONFIG with PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY and PSA_WANT_ALG_RSA_PSS
+ msg "build: full + MBEDTLS_PSA_CRYPTO_CONFIG + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY + PSA_WANT_ALG_RSA_PSS"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f include/psa/crypto_config.h set PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY 1
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_PUBLIC_KEY -I../tests/include -O2" LDFLAGS="$ASAN_CFLAGS"
+}
+
+component_test_no_platform () {
+ # Full configuration build, without platform support, file IO and net sockets.
+ # This should catch missing mbedtls_printf definitions, and by disabling file
+ # IO, it should catch missing '#include <stdio.h>'
+ msg "build: full config except platform/fsio/net, make, gcc, C99" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_PLATFORM_C
+ scripts/config.py unset MBEDTLS_NET_C
+ scripts/config.py unset MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py unset MBEDTLS_PLATFORM_PRINTF_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_FPRINTF_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_SNPRINTF_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_TIME_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_EXIT_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_FS_IO
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C
+ # Note, _DEFAULT_SOURCE needs to be defined for platforms using glibc version >2.19,
+ # to re-enable platform integration features otherwise disabled in C99 builds
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -std=c99 -pedantic -Os -D_DEFAULT_SOURCE' lib programs
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -Os' test
+}
+
+component_build_no_std_function () {
+ # catch compile bugs in _uninit functions
+ msg "build: full config with NO_STD_FUNCTION, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -Os'
+}
+
+component_build_no_ssl_srv () {
+ msg "build: full config except ssl_srv.c, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_SSL_SRV_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1'
+}
+
+component_build_no_ssl_cli () {
+ msg "build: full config except ssl_cli.c, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_SSL_CLI_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1'
+}
+
+component_build_no_sockets () {
+ # Note, C99 compliance can also be tested with the sockets support disabled,
+ # as that requires a POSIX platform (which isn't the same as C99).
+ msg "build: full config except net_sockets.c, make, gcc -std=c99 -pedantic" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_NET_C # getaddrinfo() undeclared, etc.
+ scripts/config.py set MBEDTLS_NO_PLATFORM_ENTROPY # uses syscall() on GNU/Linux
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1 -std=c99 -pedantic' lib
+}
+
+component_test_memory_buffer_allocator_backtrace () {
+ msg "build: default config with memory buffer allocator and backtrace enabled"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_MEMORY_BACKTRACE
+ scripts/config.py set MBEDTLS_MEMORY_DEBUG
+ CC=gcc cmake .
+ make
+
+ msg "test: MBEDTLS_MEMORY_BUFFER_ALLOC_C and MBEDTLS_MEMORY_BACKTRACE"
+ make test
+}
+
+component_test_memory_buffer_allocator () {
+ msg "build: default config with memory buffer allocator"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ CC=gcc cmake .
+ make
+
+ msg "test: MBEDTLS_MEMORY_BUFFER_ALLOC_C"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_MEMORY_BUFFER_ALLOC_C"
+ # MBEDTLS_MEMORY_BUFFER_ALLOC is slow. Skip tests that tend to time out.
+ if_build_succeeded tests/ssl-opt.sh -e '^DTLS proxy'
+}
+
+component_test_no_max_fragment_length () {
+ # Run max fragment length tests with MFL disabled
+ msg "build: default config except MFL extension (ASan build)" # ~ 30s
+ scripts/config.py unset MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: ssl-opt.sh, MFL-related tests"
+ if_build_succeeded tests/ssl-opt.sh -f "Max fragment length"
+}
+
+component_test_asan_remove_peer_certificate () {
+ msg "build: default config with MBEDTLS_SSL_KEEP_PEER_CERTIFICATE disabled (ASan build)"
+ scripts/config.py unset MBEDTLS_SSL_KEEP_PEER_CERTIFICATE
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ make test
+
+ msg "test: ssl-opt.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ if_build_succeeded tests/compat.sh
+
+ msg "test: context-info.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ if_build_succeeded tests/context-info.sh
+}
+
+component_test_no_max_fragment_length_small_ssl_out_content_len () {
+ msg "build: no MFL extension, small SSL_OUT_CONTENT_LEN (ASan build)"
+ scripts/config.py unset MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 16384
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 4096
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MFL tests (disabled MFL extension case) & large packet tests"
+ if_build_succeeded tests/ssl-opt.sh -f "Max fragment length\|Large buffer"
+
+ msg "test: context-info.sh (disabled MFL extension case)"
+ if_build_succeeded tests/context-info.sh
+}
+
+component_test_variable_ssl_in_out_buffer_len () {
+ msg "build: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ if_build_succeeded tests/compat.sh
+}
+
+component_test_variable_ssl_in_out_buffer_len_CID () {
+ msg "build: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH and MBEDTLS_SSL_DTLS_CONNECTION_ID enabled (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH
+ scripts/config.py set MBEDTLS_SSL_DTLS_CONNECTION_ID
+
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH and MBEDTLS_SSL_DTLS_CONNECTION_ID"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH and MBEDTLS_SSL_DTLS_CONNECTION_ID enabled"
+ if_build_succeeded tests/ssl-opt.sh
+
+ msg "test: compat.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH and MBEDTLS_SSL_DTLS_CONNECTION_ID enabled"
+ if_build_succeeded tests/compat.sh
+}
+
+component_test_ssl_alloc_buffer_and_mfl () {
+ msg "build: default config with memory buffer allocator and MFL extension"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_MEMORY_DEBUG
+ scripts/config.py set MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ scripts/config.py set MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH
+ CC=gcc cmake .
+ make
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH, MBEDTLS_MEMORY_BUFFER_ALLOC_C, MBEDTLS_MEMORY_DEBUG and MBEDTLS_SSL_MAX_FRAGMENT_LENGTH"
+ make test
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH, MBEDTLS_MEMORY_BUFFER_ALLOC_C, MBEDTLS_MEMORY_DEBUG and MBEDTLS_SSL_MAX_FRAGMENT_LENGTH"
+ if_build_succeeded tests/ssl-opt.sh -f "Handshake memory usage"
+}
+
+component_test_when_no_ciphersuites_have_mac () {
+ msg "build: when no ciphersuites have MAC"
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CMAC_C
+ make
+
+ msg "test: !MBEDTLS_SSL_SOME_MODES_USE_MAC"
+ make test
+
+ msg "test ssl-opt.sh: !MBEDTLS_SSL_SOME_MODES_USE_MAC"
+ if_build_succeeded tests/ssl-opt.sh -f 'Default\|EtM' -e 'without EtM'
+}
+
+component_test_no_date_time () {
+ msg "build: default config without MBEDTLS_HAVE_TIME_DATE"
+ scripts/config.py unset MBEDTLS_HAVE_TIME_DATE
+ CC=gcc cmake
+ make
+
+ msg "test: !MBEDTLS_HAVE_TIME_DATE - main suites"
+ make test
+}
+
+component_test_platform_calloc_macro () {
+ msg "build: MBEDTLS_PLATFORM_{CALLOC/FREE}_MACRO enabled (ASan build)"
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_PLATFORM_CALLOC_MACRO calloc
+ scripts/config.py set MBEDTLS_PLATFORM_FREE_MACRO free
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_PLATFORM_{CALLOC/FREE}_MACRO enabled (ASan build)"
+ make test
+}
+
+component_test_malloc_0_null () {
+ msg "build: malloc(0) returns NULL (ASan+UBSan build)"
+ scripts/config.py full
+ make CC=gcc CFLAGS="'-DMBEDTLS_CONFIG_FILE=\"$PWD/tests/configs/config-wrapper-malloc-0-null.h\"' $ASAN_CFLAGS -O" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: malloc(0) returns NULL (ASan+UBSan build)"
+ make test
+
+ msg "selftest: malloc(0) returns NULL (ASan+UBSan build)"
+ # Just the calloc selftest. "make test" ran the others as part of the
+ # test suites.
+ if_build_succeeded programs/test/selftest calloc
+
+ msg "test ssl-opt.sh: malloc(0) returns NULL (ASan+UBSan build)"
+ # Run a subset of the tests. The choice is a balance between coverage
+ # and time (including time indirectly wasted due to flaky tests).
+ # The current choice is to skip tests whose description includes
+ # "proxy", which is an approximation of skipping tests that use the
+ # UDP proxy, which tend to be slower and flakier.
+ if_build_succeeded tests/ssl-opt.sh -e 'proxy'
+}
+
+component_test_aes_fewer_tables () {
+ msg "build: default config with AES_FEWER_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_FEWER_TABLES
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra'
+
+ msg "test: AES_FEWER_TABLES"
+ make test
+}
+
+component_test_aes_rom_tables () {
+ msg "build: default config with AES_ROM_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_ROM_TABLES
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra'
+
+ msg "test: AES_ROM_TABLES"
+ make test
+}
+
+component_test_aes_fewer_tables_and_rom_tables () {
+ msg "build: default config with AES_ROM_TABLES and AES_FEWER_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_FEWER_TABLES
+ scripts/config.py set MBEDTLS_AES_ROM_TABLES
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra'
+
+ msg "test: AES_FEWER_TABLES + AES_ROM_TABLES"
+ make test
+}
+
+component_test_ctr_drbg_aes_256_sha_256 () {
+ msg "build: full + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_ENTROPY_FORCE_SHA256
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ make test
+}
+
+component_test_ctr_drbg_aes_128_sha_512 () {
+ msg "build: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_CTR_DRBG_USE_128_BIT_KEY
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY (ASan build)"
+ make test
+}
+
+component_test_ctr_drbg_aes_128_sha_256 () {
+ msg "build: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_CTR_DRBG_USE_128_BIT_KEY
+ scripts/config.py set MBEDTLS_ENTROPY_FORCE_SHA256
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ make test
+}
+
+component_test_se_default () {
+ msg "build: default config + MBEDTLS_PSA_CRYPTO_SE_C"
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_SE_C
+ make CC=clang CFLAGS="$ASAN_CFLAGS -Os" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: default config + MBEDTLS_PSA_CRYPTO_SE_C"
+ make test
+}
+
+component_test_psa_crypto_drivers () {
+ msg "build: MBEDTLS_PSA_CRYPTO_DRIVERS w/ driver hooks"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_DRIVERS
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ loc_cflags="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_AES"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_CAMELLIA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_ECC_KEY_PAIR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_KEY_PAIR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CBC_NO_PADDING"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CBC_PKCS7"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CTR"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CFB"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_ECDSA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_DETERMINISTIC_ECDSA"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_MD5"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_OFB"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RIPEMD160"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_SIGN"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_RSA_PSS"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_1"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_224"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_256"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_384"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_SHA_512"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_XTS"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_CMAC"
+ loc_cflags="${loc_cflags} -DMBEDTLS_PSA_ACCEL_ALG_HMAC"
+ loc_cflags="${loc_cflags} -I../tests/include -O2"
+
+ make CC=gcc CFLAGS="${loc_cflags}" LDFLAGS="$ASAN_CFLAGS"
+ unset loc_cflags
+
+ msg "test: full + MBEDTLS_PSA_CRYPTO_DRIVERS"
+ make test
+}
+
+component_test_make_shared () {
+ msg "build/test: make shared" # ~ 40s
+ make SHARED=1 all check
+ ldd programs/util/strerror | grep libmbedcrypto
+}
+
+component_test_cmake_shared () {
+ msg "build/test: cmake shared" # ~ 2min
+ cmake -DUSE_SHARED_MBEDTLS_LIBRARY=On .
+ make
+ ldd programs/util/strerror | grep libmbedcrypto
+ make test
+}
+
+test_build_opt () {
+ info=$1 cc=$2; shift 2
+ for opt in "$@"; do
+ msg "build/test: $cc $opt, $info" # ~ 30s
+ make CC="$cc" CFLAGS="$opt -std=c99 -pedantic -Wall -Wextra -Werror"
+ # We're confident enough in compilers to not run _all_ the tests,
+ # but at least run the unit tests. In particular, runs with
+ # optimizations use inline assembly whereas runs with -O0
+ # skip inline assembly.
+ make test # ~30s
+ make clean
+ done
+}
+
+component_test_clang_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' clang -O0 -Os -O2
+}
+
+component_test_gcc_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' gcc -O0 -Os -O2
+}
+
+component_build_mbedtls_config_file () {
+ msg "build: make with MBEDTLS_CONFIG_FILE" # ~40s
+ # Use the full config so as to catch a maximum of places where
+ # the check of MBEDTLS_CONFIG_FILE might be missing.
+ scripts/config.py full
+ sed 's!"check_config.h"!"mbedtls/check_config.h"!' <"$CONFIG_H" >full_config.h
+ echo '#error "MBEDTLS_CONFIG_FILE is not working"' >"$CONFIG_H"
+ make CFLAGS="-I '$PWD' -DMBEDTLS_CONFIG_FILE='\"full_config.h\"'"
+ rm -f full_config.h
+}
+
+component_test_m32_o0 () {
+ # Build once with -O0, to compile out the i386 specific inline assembly
+ msg "build: i386, make, gcc -O0 (ASan build)" # ~ 30s
+ scripts/config.py full
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32 -O0" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, make, gcc -O0 (ASan build)"
+ make test
+}
+support_test_m32_o0 () {
+ case $(uname -m) in
+ *64*) true;;
+ *) false;;
+ esac
+}
+
+component_test_m32_o1 () {
+ # Build again with -O1, to compile in the i386 specific inline assembly
+ msg "build: i386, make, gcc -O1 (ASan build)" # ~ 30s
+ scripts/config.py full
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32 -O1" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, make, gcc -O1 (ASan build)"
+ make test
+
+ msg "test ssl-opt.sh, i386, make, gcc-O1"
+ if_build_succeeded tests/ssl-opt.sh
+}
+support_test_m32_o1 () {
+ support_test_m32_o0 "$@"
+}
+
+component_test_m32_everest () {
+ msg "build: i386, Everest ECDH context (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32 -O2" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, Everest ECDH context - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: i386, Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ if_build_succeeded tests/ssl-opt.sh -f ECDH
+
+ msg "test: i386, Everest ECDH context - compat.sh with some ECDH ciphersuites (ASan build)" # ~ 3 min
+ # Exclude some symmetric ciphers that are redundant here to gain time.
+ if_build_succeeded tests/compat.sh -f ECDH -V NO -e 'ARIA\|CAMELLIA\|CHACHA\|DES'
+}
+support_test_m32_everest () {
+ support_test_m32_o0 "$@"
+}
+
+component_test_mx32 () {
msg "build: 64-bit ILP32, make, gcc" # ~ 30s
- cleanup
- make CC=gcc CFLAGS='-Werror -Wall -Wextra -mx32'
+ scripts/config.py full
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -mx32' LDFLAGS='-mx32'
msg "test: 64-bit ILP32, make, gcc"
make test
-fi # x86_64
+}
+support_test_mx32 () {
+ case $(uname -m) in
+ amd64|x86_64) true;;
+ *) false;;
+ esac
+}
-msg "build: gcc, force 32-bit bignum limbs"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl unset MBEDTLS_HAVE_ASM
-scripts/config.pl unset MBEDTLS_AESNI_C
-scripts/config.pl unset MBEDTLS_PADLOCK_C
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -DMBEDTLS_HAVE_INT32'
+component_test_min_mpi_window_size () {
+ msg "build: Default + MBEDTLS_MPI_WINDOW_SIZE=1 (ASan build)" # ~ 10s
+ scripts/config.py set MBEDTLS_MPI_WINDOW_SIZE 1
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
-msg "test: gcc, force 32-bit bignum limbs"
-make test
+ msg "test: MBEDTLS_MPI_WINDOW_SIZE=1 - main suites (inc. selftests) (ASan build)" # ~ 10s
+ make test
+}
-msg "build: gcc, force 64-bit bignum limbs"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl unset MBEDTLS_HAVE_ASM
-scripts/config.pl unset MBEDTLS_AESNI_C
-scripts/config.pl unset MBEDTLS_PADLOCK_C
-make CC=gcc CFLAGS='-Werror -Wall -Wextra -DMBEDTLS_HAVE_INT64'
+component_test_have_int32 () {
+ msg "build: gcc, force 32-bit bignum limbs"
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -DMBEDTLS_HAVE_INT32'
-msg "test: gcc, force 64-bit bignum limbs"
-make test
+ msg "test: gcc, force 32-bit bignum limbs"
+ make test
+}
-msg "build: arm-none-eabi-gcc, make" # ~ 10s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_NET_C
-scripts/config.pl unset MBEDTLS_TIMING_C
-scripts/config.pl unset MBEDTLS_FS_IO
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-scripts/config.pl set MBEDTLS_NO_PLATFORM_ENTROPY
-# following things are not in the default config
-scripts/config.pl unset MBEDTLS_HAVEGE_C # depends on timing.c
-scripts/config.pl unset MBEDTLS_THREADING_PTHREAD
-scripts/config.pl unset MBEDTLS_THREADING_C
-scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # execinfo.h
-scripts/config.pl unset MBEDTLS_MEMORY_BUFFER_ALLOC_C # calls exit
-make CC=arm-none-eabi-gcc AR=arm-none-eabi-ar LD=arm-none-eabi-ld CFLAGS='-Werror -Wall -Wextra' lib
+component_test_have_int64 () {
+ msg "build: gcc, force 64-bit bignum limbs"
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -DMBEDTLS_HAVE_INT64'
-msg "build: arm-none-eabi-gcc -DMBEDTLS_NO_UDBL_DIVISION, make" # ~ 10s
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_NET_C
-scripts/config.pl unset MBEDTLS_TIMING_C
-scripts/config.pl unset MBEDTLS_FS_IO
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-scripts/config.pl set MBEDTLS_NO_PLATFORM_ENTROPY
-# following things are not in the default config
-scripts/config.pl unset MBEDTLS_HAVEGE_C # depends on timing.c
-scripts/config.pl unset MBEDTLS_THREADING_PTHREAD
-scripts/config.pl unset MBEDTLS_THREADING_C
-scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # execinfo.h
-scripts/config.pl unset MBEDTLS_MEMORY_BUFFER_ALLOC_C # calls exit
-scripts/config.pl set MBEDTLS_NO_UDBL_DIVISION
-make CC=arm-none-eabi-gcc AR=arm-none-eabi-ar LD=arm-none-eabi-ld CFLAGS='-Werror -Wall -Wextra' lib
-echo "Checking that software 64-bit division is not required"
-! grep __aeabi_uldiv library/*.o
+ msg "test: gcc, force 64-bit bignum limbs"
+ make test
+}
-msg "build: ARM Compiler 5, make"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_NET_C
-scripts/config.pl unset MBEDTLS_TIMING_C
-scripts/config.pl unset MBEDTLS_FS_IO
-scripts/config.pl unset MBEDTLS_ENTROPY_NV_SEED
-scripts/config.pl unset MBEDTLS_HAVE_TIME
-scripts/config.pl unset MBEDTLS_HAVE_TIME_DATE
-scripts/config.pl set MBEDTLS_NO_PLATFORM_ENTROPY
-# following things are not in the default config
-scripts/config.pl unset MBEDTLS_DEPRECATED_WARNING
-scripts/config.pl unset MBEDTLS_HAVEGE_C # depends on timing.c
-scripts/config.pl unset MBEDTLS_THREADING_PTHREAD
-scripts/config.pl unset MBEDTLS_THREADING_C
-scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # execinfo.h
-scripts/config.pl unset MBEDTLS_MEMORY_BUFFER_ALLOC_C # calls exit
-scripts/config.pl unset MBEDTLS_PLATFORM_TIME_ALT # depends on MBEDTLS_HAVE_TIME
+component_test_no_udbl_division () {
+ msg "build: MBEDTLS_NO_UDBL_DIVISION native" # ~ 10s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_NO_UDBL_DIVISION
+ make CFLAGS='-Werror -O1'
-if [ $RUN_ARMCC -ne 0 ]; then
+ msg "test: MBEDTLS_NO_UDBL_DIVISION native" # ~ 10s
+ make test
+}
+
+component_test_no_64bit_multiplication () {
+ msg "build: MBEDTLS_NO_64BIT_MULTIPLICATION native" # ~ 10s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_NO_64BIT_MULTIPLICATION
+ make CFLAGS='-Werror -O1'
+
+ msg "test: MBEDTLS_NO_64BIT_MULTIPLICATION native" # ~ 10s
+ make test
+}
+
+component_test_no_strings () {
+ msg "build: no strings" # ~10s
+ scripts/config.py full
+ # Disable options that activate a large amount of string constants.
+ scripts/config.py unset MBEDTLS_DEBUG_C
+ scripts/config.py unset MBEDTLS_ERROR_C
+ scripts/config.py set MBEDTLS_ERROR_STRERROR_DUMMY
+ scripts/config.py unset MBEDTLS_VERSION_FEATURES
+ make CFLAGS='-Werror -Os'
+
+ msg "test: no strings" # ~ 10s
+ make test
+}
+
+component_test_no_x509_info () {
+ msg "build: full + MBEDTLS_X509_REMOVE_INFO" # ~ 10s
+ scripts/config.pl full
+ scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # too slow for tests
+ scripts/config.pl set MBEDTLS_X509_REMOVE_INFO
+ make CFLAGS='-Werror -O1'
+
+ msg "test: full + MBEDTLS_X509_REMOVE_INFO" # ~ 10s
+ make test
+
+ msg "test: ssl-opt.sh, full + MBEDTLS_X509_REMOVE_INFO" # ~ 1 min
+ if_build_succeeded tests/ssl-opt.sh
+}
+
+component_build_arm_none_eabi_gcc () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -O1" # ~ 10s
+ scripts/config.py baremetal
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra -O1' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -O1"
+ ${ARM_NONE_EABI_GCC_PREFIX}size library/*.o
+}
+
+component_build_arm_none_eabi_gcc_arm5vte () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -march=arm5vte" # ~ 10s
+ scripts/config.py baremetal
+ # Build for a target platform that's close to what Debian uses
+ # for its "armel" distribution (https://wiki.debian.org/ArmEabiPort).
+ # See https://github.com/ARMmbed/mbedtls/pull/2169 and comments.
+ # It would be better to build with arm-linux-gnueabi-gcc but
+ # we don't have that on our CI at this time.
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" CFLAGS='-std=c99 -Werror -Wall -Wextra -march=armv5te -O1' LDFLAGS='-march=armv5te' SHELL='sh -x' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -march=armv5te -O1"
+ ${ARM_NONE_EABI_GCC_PREFIX}size library/*.o
+}
+
+component_build_arm_none_eabi_gcc_m0plus () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -mthumb -mcpu=cortex-m0plus" # ~ 10s
+ scripts/config.py baremetal
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra -mthumb -mcpu=cortex-m0plus -Os' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -mthumb -mcpu=cortex-m0plus -Os"
+ ${ARM_NONE_EABI_GCC_PREFIX}size library/*.o
+}
+
+component_build_arm_none_eabi_gcc_no_udbl_division () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -DMBEDTLS_NO_UDBL_DIVISION, make" # ~ 10s
+ scripts/config.py baremetal
+ scripts/config.py set MBEDTLS_NO_UDBL_DIVISION
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra' lib
+ echo "Checking that software 64-bit division is not required"
+ if_build_succeeded not grep __aeabi_uldiv library/*.o
+}
+
+component_build_arm_none_eabi_gcc_no_64bit_multiplication () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc MBEDTLS_NO_64BIT_MULTIPLICATION, make" # ~ 10s
+ scripts/config.py baremetal
+ scripts/config.py set MBEDTLS_NO_64BIT_MULTIPLICATION
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -O1 -march=armv6-m -mthumb' lib
+ echo "Checking that software 64-bit multiplication is not required"
+ if_build_succeeded not grep __aeabi_lmul library/*.o
+}
+
+component_build_armcc () {
+ msg "build: ARM Compiler 5"
+ scripts/config.py baremetal
make CC="$ARMC5_CC" AR="$ARMC5_AR" WARNING_CFLAGS='--strict --c99' lib
+
+ msg "size: ARM Compiler 5"
+ "$ARMC5_FROMELF" -z library/*.o
+
make clean
# ARM Compiler 6 - Target ARMv7-A
@@ -766,47 +2434,40 @@
# ARM Compiler 6 - Target ARMv8-A - AArch64
armc6_build_test "--target=aarch64-arm-none-eabi -march=armv8.2-a"
-fi
+}
-msg "build: allow SHA1 in certificates by default"
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_TLS_DEFAULT_ALLOW_SHA1_IN_CERTIFICATES
-make CFLAGS='-Werror -Wall -Wextra'
-msg "test: allow SHA1 in certificates by default"
-make test
-if_build_succeeded tests/ssl-opt.sh -f SHA-1
+component_test_tls13_experimental () {
+ msg "build: default config with MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL enabled"
+ scripts/config.pl set MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+ msg "test: default config with MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL enabled"
+ make test
+}
-msg "build: Default + MBEDTLS_RSA_NO_CRT (ASan build)" # ~ 6 min
-cleanup
-cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl set MBEDTLS_RSA_NO_CRT
-CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
-make
+component_build_mingw () {
+ msg "build: Windows cross build - mingw64, make (Link Library)" # ~ 30s
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 lib programs
-msg "test: MBEDTLS_RSA_NO_CRT - main suites (inc. selftests) (ASan build)"
-make test
+ # note Make tests only builds the tests, but doesn't run them
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror' WINDOWS_BUILD=1 tests
+ make WINDOWS_BUILD=1 clean
-msg "build: Windows cross build - mingw64, make (Link Library)" # ~ 30s
-cleanup
-make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 lib programs
+ msg "build: Windows cross build - mingw64, make (DLL)" # ~ 30s
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 SHARED=1 lib programs
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 SHARED=1 tests
+ make WINDOWS_BUILD=1 clean
+}
+support_build_mingw() {
+ case $(i686-w64-mingw32-gcc -dumpversion) in
+ [0-5]*) false;;
+ *) true;;
+ esac
+}
-# note Make tests only builds the tests, but doesn't run them
-make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror' WINDOWS_BUILD=1 tests
-make WINDOWS_BUILD=1 clean
-
-msg "build: Windows cross build - mingw64, make (DLL)" # ~ 30s
-make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 SHARED=1 lib programs
-make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 SHARED=1 tests
-make WINDOWS_BUILD=1 clean
-
-# MemSan currently only available on Linux 64 bits
-if uname -a | grep 'Linux.*x86_64' >/dev/null; then
-
+component_test_memsan () {
msg "build: MSan (clang)" # ~ 1 min 20s
- cleanup
- cp "$CONFIG_H" "$CONFIG_BAK"
- scripts/config.pl unset MBEDTLS_AESNI_C # memsan doesn't grok asm
+ scripts/config.py unset MBEDTLS_AESNI_C # memsan doesn't grok asm
CC=clang cmake -D CMAKE_BUILD_TYPE:String=MemSan .
make
@@ -822,21 +2483,18 @@
msg "test: compat.sh (MSan)" # ~ 6 min 20s
if_build_succeeded tests/compat.sh
fi
+}
-else # no MemSan
-
+component_test_valgrind () {
msg "build: Release (clang)"
- cleanup
CC=clang cmake -D CMAKE_BUILD_TYPE:String=Release .
make
msg "test: main suites valgrind (Release)"
make memcheck
- # Optional part(s)
- # Currently broken, programs don't seem to receive signals
- # under valgrind on OS X
-
+ # Optional parts (slow; currently broken on OS X because programs don't
+ # seem to receive signals under valgrind on OS X).
if [ "$MEMORY" -gt 0 ]; then
msg "test: ssl-opt.sh --memcheck (Release)"
if_build_succeeded tests/ssl-opt.sh --memcheck
@@ -847,37 +2505,189 @@
if_build_succeeded tests/compat.sh --memcheck
fi
-fi # MemSan
+ if [ "$MEMORY" -gt 0 ]; then
+ msg "test: context-info.sh --memcheck (Release)"
+ if_build_succeeded tests/context-info.sh --memcheck
+ fi
+}
-msg "build: cmake 'out-of-source' build"
-cleanup
-MBEDTLS_ROOT_DIR="$PWD"
-mkdir "$OUT_OF_SOURCE_DIR"
-cd "$OUT_OF_SOURCE_DIR"
-cmake "$MBEDTLS_ROOT_DIR"
-make
+component_test_cmake_out_of_source () {
+ msg "build: cmake 'out-of-source' build"
+ MBEDTLS_ROOT_DIR="$PWD"
+ mkdir "$OUT_OF_SOURCE_DIR"
+ cd "$OUT_OF_SOURCE_DIR"
+ cmake "$MBEDTLS_ROOT_DIR"
+ make
-msg "test: cmake 'out-of-source' build"
-make test
-cd "$MBEDTLS_ROOT_DIR"
-rm -rf "$OUT_OF_SOURCE_DIR"
+ msg "test: cmake 'out-of-source' build"
+ make test
+ # Test an SSL option that requires an auxiliary script in test/scripts/.
+ # Also ensure that there are no error messages such as
+ # "No such file or directory", which would indicate that some required
+ # file is missing (ssl-opt.sh tolerates the absence of some files so
+ # may exit with status 0 but emit errors).
+ if_build_succeeded ./tests/ssl-opt.sh -f 'Fallback SCSV: beginning of list' 2>ssl-opt.err
+ if [ -s ssl-opt.err ]; then
+ cat ssl-opt.err >&2
+ record_status [ ! -s ssl-opt.err ]
+ rm ssl-opt.err
+ fi
+ cd "$MBEDTLS_ROOT_DIR"
+ rm -rf "$OUT_OF_SOURCE_DIR"
+ unset MBEDTLS_ROOT_DIR
+}
-for optimization_flag in -O2 -O3 -Ofast -Os; do
- for compiler in clang gcc; do
- msg "test: $compiler $optimization_flag, mbedtls_platform_zeroize()"
- cleanup
- CC="$compiler" DEBUG=1 CFLAGS="$optimization_flag" make programs
- gdb -x tests/scripts/test_zeroize.gdb -nw -batch -nx
+component_test_cmake_as_subdirectory () {
+ msg "build: cmake 'as-subdirectory' build"
+ MBEDTLS_ROOT_DIR="$PWD"
+
+ cd programs/test/cmake_subproject
+ cmake .
+ make
+ if_build_succeeded ./cmake_subproject
+
+ cd "$MBEDTLS_ROOT_DIR"
+ unset MBEDTLS_ROOT_DIR
+}
+
+component_test_cmake_as_package () {
+ msg "build: cmake 'as-package' build"
+ MBEDTLS_ROOT_DIR="$PWD"
+
+ cd programs/test/cmake_package
+ cmake .
+ make
+ if_build_succeeded ./cmake_package
+
+ cd "$MBEDTLS_ROOT_DIR"
+ unset MBEDTLS_ROOT_DIR
+}
+
+component_test_cmake_as_package_install () {
+ msg "build: cmake 'as-installed-package' build"
+ MBEDTLS_ROOT_DIR="$PWD"
+
+ cd programs/test/cmake_package_install
+ cmake .
+ make
+ if_build_succeeded ./cmake_package_install
+
+ cd "$MBEDTLS_ROOT_DIR"
+ unset MBEDTLS_ROOT_DIR
+}
+
+component_test_zeroize () {
+ # Test that the function mbedtls_platform_zeroize() is not optimized away by
+ # different combinations of compilers and optimization flags by using an
+ # auxiliary GDB script. Unfortunately, GDB does not return error values to the
+ # system in all cases that the script fails, so we must manually search the
+ # output to check whether the pass string is present and no failure strings
+ # were printed.
+
+ # Don't try to disable ASLR. We don't care about ASLR here. We do care
+ # about a spurious message if Gdb tries and fails, so suppress that.
+ gdb_disable_aslr=
+ if [ -z "$(gdb -batch -nw -ex 'set disable-randomization off' 2>&1)" ]; then
+ gdb_disable_aslr='set disable-randomization off'
+ fi
+
+ for optimization_flag in -O2 -O3 -Ofast -Os; do
+ for compiler in clang gcc; do
+ msg "test: $compiler $optimization_flag, mbedtls_platform_zeroize()"
+ make programs CC="$compiler" DEBUG=1 CFLAGS="$optimization_flag"
+ if_build_succeeded gdb -ex "$gdb_disable_aslr" -x tests/scripts/test_zeroize.gdb -nw -batch -nx 2>&1 | tee test_zeroize.log
+ if_build_succeeded grep "The buffer was correctly zeroized" test_zeroize.log
+ if_build_succeeded not grep -i "error" test_zeroize.log
+ rm -f test_zeroize.log
+ make clean
+ done
done
-done
+ unset gdb_disable_aslr
+}
+component_check_python_files () {
+ msg "Lint: Python scripts"
+ record_status tests/scripts/check-python-files.sh
+}
+
+component_check_generate_test_code () {
+ msg "uint test: generate_test_code.py"
+ # unittest writes out mundane stuff like number or tests run on stderr.
+ # Our convention is to reserve stderr for actual errors, and write
+ # harmless info on stdout so it can be suppress with --quiet.
+ record_status ./tests/scripts/test_generate_test_code.py 2>&1
+}
################################################################
#### Termination
################################################################
-msg "Done, cleaning up"
-cleanup
+post_report () {
+ msg "Done, cleaning up"
+ cleanup
-final_report
+ final_report
+}
+
+
+
+################################################################
+#### Run all the things
+################################################################
+
+# Run one component and clean up afterwards.
+run_component () {
+ # Back up the configuration in case the component modifies it.
+ # The cleanup function will restore it.
+ cp -p "$CONFIG_H" "$CONFIG_BAK"
+ cp -p "$CRYPTO_CONFIG_H" "$CRYPTO_CONFIG_BAK"
+ current_component="$1"
+ export MBEDTLS_TEST_CONFIGURATION="$current_component"
+
+ # Unconditionally create a seedfile that's sufficiently long.
+ # Do this before each component, because a previous component may
+ # have messed it up or shortened it.
+ redirect_err dd if=/dev/urandom of=./tests/seedfile bs=64 count=1
+
+ # Run the component code.
+ if [ $QUIET -eq 1 ]; then
+ # msg() is silenced, so just print the component name here
+ echo "${current_component#component_}"
+ fi
+ redirect_out "$@"
+
+ # Restore the build tree to a clean state.
+ cleanup
+ unset current_component
+}
+
+# Preliminary setup
+pre_check_environment
+pre_initialize_variables
+pre_parse_command_line "$@"
+
+pre_check_git
+
+build_status=0
+if [ $KEEP_GOING -eq 1 ]; then
+ pre_setup_keep_going
+else
+ record_status () {
+ "$@"
+ }
+fi
+pre_setup_quiet_redirect
+pre_prepare_outcome_file
+pre_print_configuration
+pre_check_tools
+cleanup
+pre_generate_files
+
+# Run the requested tests.
+for component in $RUN_COMPONENTS; do
+ run_component "component_$component"
+done
+
+# We're done.
+post_report
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
new file mode 100755
index 0000000..73f16bd
--- /dev/null
+++ b/tests/scripts/analyze_outcomes.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+
+"""Analyze the test outcomes from a full CI run.
+
+This script can also run on outcomes from a partial run, but the results are
+less likely to be useful.
+"""
+
+import argparse
+import re
+import sys
+import traceback
+
+import check_test_cases
+
+class Results:
+ """Process analysis results."""
+
+ def __init__(self):
+ self.error_count = 0
+ self.warning_count = 0
+
+ @staticmethod
+ def log(fmt, *args, **kwargs):
+ sys.stderr.write((fmt + '\n').format(*args, **kwargs))
+
+ def error(self, fmt, *args, **kwargs):
+ self.log('Error: ' + fmt, *args, **kwargs)
+ self.error_count += 1
+
+ def warning(self, fmt, *args, **kwargs):
+ self.log('Warning: ' + fmt, *args, **kwargs)
+ self.warning_count += 1
+
+class TestCaseOutcomes:
+ """The outcomes of one test case across many configurations."""
+ # pylint: disable=too-few-public-methods
+
+ def __init__(self):
+ # Collect a list of witnesses of the test case succeeding or failing.
+ # Currently we don't do anything with witnesses except count them.
+ # The format of a witness is determined by the read_outcome_file
+ # function; it's the platform and configuration joined by ';'.
+ self.successes = []
+ self.failures = []
+
+ def hits(self):
+ """Return the number of times a test case has been run.
+
+ This includes passes and failures, but not skips.
+ """
+ return len(self.successes) + len(self.failures)
+
+class TestDescriptions(check_test_cases.TestDescriptionExplorer):
+ """Collect the available test cases."""
+
+ def __init__(self):
+ super().__init__()
+ self.descriptions = set()
+
+ def process_test_case(self, _per_file_state,
+ file_name, _line_number, description):
+ """Record an available test case."""
+ base_name = re.sub(r'\.[^.]*$', '', re.sub(r'.*/', '', file_name))
+ key = ';'.join([base_name, description.decode('utf-8')])
+ self.descriptions.add(key)
+
+def collect_available_test_cases():
+ """Collect the available test cases."""
+ explorer = TestDescriptions()
+ explorer.walk_all()
+ return sorted(explorer.descriptions)
+
+def analyze_coverage(results, outcomes):
+ """Check that all available test cases are executed at least once."""
+ available = collect_available_test_cases()
+ for key in available:
+ hits = outcomes[key].hits() if key in outcomes else 0
+ if hits == 0:
+ # Make this a warning, not an error, as long as we haven't
+ # fixed this branch to have full coverage of test cases.
+ results.warning('Test case not executed: {}', key)
+
+def analyze_outcomes(outcomes):
+ """Run all analyses on the given outcome collection."""
+ results = Results()
+ analyze_coverage(results, outcomes)
+ return results
+
+def read_outcome_file(outcome_file):
+ """Parse an outcome file and return an outcome collection.
+
+An outcome collection is a dictionary mapping keys to TestCaseOutcomes objects.
+The keys are the test suite name and the test case description, separated
+by a semicolon.
+"""
+ outcomes = {}
+ with open(outcome_file, 'r', encoding='utf-8') as input_file:
+ for line in input_file:
+ (platform, config, suite, case, result, _cause) = line.split(';')
+ key = ';'.join([suite, case])
+ setup = ';'.join([platform, config])
+ if key not in outcomes:
+ outcomes[key] = TestCaseOutcomes()
+ if result == 'PASS':
+ outcomes[key].successes.append(setup)
+ elif result == 'FAIL':
+ outcomes[key].failures.append(setup)
+ return outcomes
+
+def analyze_outcome_file(outcome_file):
+ """Analyze the given outcome file."""
+ outcomes = read_outcome_file(outcome_file)
+ return analyze_outcomes(outcomes)
+
+def main():
+ try:
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
+ help='Outcome file to analyze')
+ options = parser.parse_args()
+ results = analyze_outcome_file(options.outcomes)
+ if results.error_count > 0:
+ sys.exit(1)
+ except Exception: # pylint: disable=broad-except
+ # Print the backtrace and exit explicitly with our chosen status.
+ traceback.print_exc()
+ sys.exit(120)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/basic-build-test.sh b/tests/scripts/basic-build-test.sh
index b405871..cf68b4e 100755
--- a/tests/scripts/basic-build-test.sh
+++ b/tests/scripts/basic-build-test.sh
@@ -2,9 +2,20 @@
# basic-build-tests.sh
#
-# This file is part of mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2016, ARM Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -19,7 +30,7 @@
#
# The tests focus on functionality and do not consider performance.
#
-# Note the tests self-adapt due to configurations in include/mbedtls/config.h
+# Note the tests self-adapt due to configurations in include/mbedtls/mbedtls_config.h
# which can lead to some tests being skipped, and can cause the number of
# available tests to fluctuate.
#
@@ -43,13 +54,26 @@
: ${GNUTLS_LEGACY_CLI:="$GNUTLS_CLI"}
: ${GNUTLS_LEGACY_SERV:="$GNUTLS_SERV"}
+# Used to make ssl-opt.sh deterministic.
+#
+# See also RELEASE_SEED in all.sh. Debugging is easier if both values are kept
+# in sync. If you change the value here because it breaks some tests, you'll
+# definitely want to change it in all.sh as well.
+: ${SEED:=1}
+export SEED
+
+# if MAKEFLAGS is not set add the -j option to speed up invocations of make
+if [ -z "${MAKEFLAGS+set}" ]; then
+ export MAKEFLAGS="-j"
+fi
+
# To avoid setting OpenSSL and GnuTLS for each call to compat.sh and ssl-opt.sh
# we just export the variables they require
export OPENSSL_CMD="$OPENSSL"
export GNUTLS_CLI="$GNUTLS_CLI"
export GNUTLS_SERV="$GNUTLS_SERV"
-CONFIG_H='include/mbedtls/config.h'
+CONFIG_H='include/mbedtls/mbedtls_config.h'
CONFIG_BAK="$CONFIG_H.bak"
# Step 0 - print build environment info
@@ -64,40 +88,66 @@
# Step 1 - Make and instrumented build for code coverage
export CFLAGS=' --coverage -g3 -O0 '
+export LDFLAGS=' --coverage'
make clean
cp "$CONFIG_H" "$CONFIG_BAK"
-scripts/config.pl full
-scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE
-make -j
+scripts/config.py full
+make
# Step 2 - Execute the tests
TEST_OUTPUT=out_${PPID}
cd tests
-
-# Step 2a - Unit Tests
-perl scripts/run-test-suites.pl -v |tee unit-test-$TEST_OUTPUT
+if [ ! -f "seedfile" ]; then
+ dd if=/dev/urandom of="seedfile" bs=64 count=1
+fi
echo
-# Step 2b - System Tests
+# Step 2a - Unit Tests (keep going even if some tests fail)
+echo '################ Unit tests ################'
+perl scripts/run-test-suites.pl -v 2 |tee unit-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ Unit tests ^^^^^^^^^^^^^^^^'
+echo
+
+# Step 2b - System Tests (keep going even if some tests fail)
+echo
+echo '################ ssl-opt.sh ################'
+echo "ssl-opt.sh will use SEED=$SEED for udp_proxy"
sh ssl-opt.sh |tee sys-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ ssl-opt.sh ^^^^^^^^^^^^^^^^'
echo
-# Step 2c - Compatibility tests
-sh compat.sh -m 'tls1 tls1_1 tls1_2 dtls1 dtls1_2' | \
- tee compat-test-$TEST_OUTPUT
-OPENSSL_CMD="$OPENSSL_LEGACY" \
- sh compat.sh -m 'ssl3' |tee -a compat-test-$TEST_OUTPUT
-OPENSSL_CMD="$OPENSSL_LEGACY" \
- GNUTLS_CLI="$GNUTLS_LEGACY_CLI" \
- GNUTLS_SERV="$GNUTLS_LEGACY_SERV" \
- sh compat.sh -e '3DES\|DES-CBC3' -f 'NULL\|DES\|RC4\|ARCFOUR' | \
- tee -a compat-test-$TEST_OUTPUT
+# Step 2c - Compatibility tests (keep going even if some tests fail)
+echo '################ compat.sh ################'
+{
+ echo '#### compat.sh: Default versions'
+ sh compat.sh -m 'tls1_2 dtls1_2'
+ echo
+
+ echo '#### compat.sh: legacy (null, DES)'
+ OPENSSL_CMD="$OPENSSL_LEGACY" \
+ GNUTLS_CLI="$GNUTLS_LEGACY_CLI" GNUTLS_SERV="$GNUTLS_LEGACY_SERV" \
+ sh compat.sh -e '^$' -f 'NULL\|DES'
+ echo
+
+ echo '#### compat.sh: next (ARIA, ChaCha)'
+ OPENSSL_CMD="$OPENSSL_NEXT" sh compat.sh -e '^$' -f 'ARIA\|CHACHA'
+ echo
+} | tee compat-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ compat.sh ^^^^^^^^^^^^^^^^'
echo
# Step 3 - Process the coverage report
cd ..
-make lcov |tee tests/cov-$TEST_OUTPUT
+{
+ make lcov
+ echo SUCCESS
+} | tee tests/cov-$TEST_OUTPUT
+
+if [ "$(tail -n1 tests/cov-$TEST_OUTPUT)" != "SUCCESS" ]; then
+ echo >&2 "Fatal: 'make lcov' failed"
+ exit 2
+fi
# Step 4 - Summarise the test report
@@ -106,114 +156,134 @@
echo "Test Report Summary"
echo
-cd tests
+# A failure of the left-hand side of a pipe is ignored (this is a limitation
+# of sh). We'll use the presence of this file as a marker that the generation
+# of the report succeeded.
+rm -f "tests/basic-build-test-$$.ok"
-# Step 4a - Unit tests
-echo "Unit tests - tests/scripts/run-test-suites.pl"
+{
-PASSED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/test cases passed :[\t]*\([0-9]*\)/\1/p'| tr -d ' ')
-SKIPPED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/skipped :[ \t]*\([0-9]*\)/\1/p'| tr -d ' ')
-TOTAL_SUITES=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) .*, [0-9]* tests run)/\1/p'| tr -d ' ')
-FAILED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/failed :[\t]*\([0-9]*\)/\1/p' |tr -d ' ')
+ cd tests
-echo "No test suites : $TOTAL_SUITES"
-echo "Passed : $PASSED_TESTS"
-echo "Failed : $FAILED_TESTS"
-echo "Skipped : $SKIPPED_TESTS"
-echo "Total exec'd tests : $(($PASSED_TESTS + $FAILED_TESTS))"
-echo "Total avail tests : $(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))"
-echo
+ # Step 4a - Unit tests
+ echo "Unit tests - tests/scripts/run-test-suites.pl"
-TOTAL_PASS=$PASSED_TESTS
-TOTAL_FAIL=$FAILED_TESTS
-TOTAL_SKIP=$SKIPPED_TESTS
-TOTAL_AVAIL=$(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))
-TOTAL_EXED=$(($PASSED_TESTS + $FAILED_TESTS))
+ PASSED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/test cases passed :[\t]*\([0-9]*\)/\1/p'| tr -d ' ')
+ SKIPPED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/skipped :[ \t]*\([0-9]*\)/\1/p'| tr -d ' ')
+ TOTAL_SUITES=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) .*, [0-9]* tests run)/\1/p'| tr -d ' ')
+ FAILED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/failed :[\t]*\([0-9]*\)/\1/p' |tr -d ' ')
-# Step 4b - TLS Options tests
-echo "TLS Options tests - tests/ssl-opt.sh"
+ echo "No test suites : $TOTAL_SUITES"
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $(($PASSED_TESTS + $FAILED_TESTS))"
+ echo "Total avail tests : $(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))"
+ echo
-PASSED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p')
-SKIPPED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p')
-TOTAL_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p')
-FAILED_TESTS=$(($TOTAL_TESTS - $PASSED_TESTS))
+ TOTAL_PASS=$PASSED_TESTS
+ TOTAL_FAIL=$FAILED_TESTS
+ TOTAL_SKIP=$SKIPPED_TESTS
+ TOTAL_AVAIL=$(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($PASSED_TESTS + $FAILED_TESTS))
-echo "Passed : $PASSED_TESTS"
-echo "Failed : $FAILED_TESTS"
-echo "Skipped : $SKIPPED_TESTS"
-echo "Total exec'd tests : $TOTAL_TESTS"
-echo "Total avail tests : $(($TOTAL_TESTS + $SKIPPED_TESTS))"
-echo
+ # Step 4b - TLS Options tests
+ echo "TLS Options tests - tests/ssl-opt.sh"
-TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
-TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
-TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
-TOTAL_AVAIL=$(($TOTAL_AVAIL + $TOTAL_TESTS + $SKIPPED_TESTS))
-TOTAL_EXED=$(($TOTAL_EXED + $TOTAL_TESTS))
+ PASSED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p')
+ SKIPPED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p')
+ TOTAL_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p')
+ FAILED_TESTS=$(($TOTAL_TESTS - $PASSED_TESTS))
+
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $TOTAL_TESTS"
+ echo "Total avail tests : $(($TOTAL_TESTS + $SKIPPED_TESTS))"
+ echo
+
+ TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
+ TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
+ TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
+ TOTAL_AVAIL=$(($TOTAL_AVAIL + $TOTAL_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($TOTAL_EXED + $TOTAL_TESTS))
-# Step 4c - System Compatibility tests
-echo "System/Compatibility tests - tests/compat.sh"
+ # Step 4c - System Compatibility tests
+ echo "System/Compatibility tests - tests/compat.sh"
-PASSED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
-SKIPPED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
-EXED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
-FAILED_TESTS=$(($EXED_TESTS - $PASSED_TESTS))
+ PASSED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ SKIPPED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ EXED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ FAILED_TESTS=$(($EXED_TESTS - $PASSED_TESTS))
-echo "Passed : $PASSED_TESTS"
-echo "Failed : $FAILED_TESTS"
-echo "Skipped : $SKIPPED_TESTS"
-echo "Total exec'd tests : $EXED_TESTS"
-echo "Total avail tests : $(($EXED_TESTS + $SKIPPED_TESTS))"
-echo
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $EXED_TESTS"
+ echo "Total avail tests : $(($EXED_TESTS + $SKIPPED_TESTS))"
+ echo
-TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
-TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
-TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
-TOTAL_AVAIL=$(($TOTAL_AVAIL + $EXED_TESTS + $SKIPPED_TESTS))
-TOTAL_EXED=$(($TOTAL_EXED + $EXED_TESTS))
+ TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
+ TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
+ TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
+ TOTAL_AVAIL=$(($TOTAL_AVAIL + $EXED_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($TOTAL_EXED + $EXED_TESTS))
-# Step 4d - Grand totals
-echo "-------------------------------------------------------------------------"
-echo "Total tests"
+ # Step 4d - Grand totals
+ echo "-------------------------------------------------------------------------"
+ echo "Total tests"
-echo "Total Passed : $TOTAL_PASS"
-echo "Total Failed : $TOTAL_FAIL"
-echo "Total Skipped : $TOTAL_SKIP"
-echo "Total exec'd tests : $TOTAL_EXED"
-echo "Total avail tests : $TOTAL_AVAIL"
-echo
+ echo "Total Passed : $TOTAL_PASS"
+ echo "Total Failed : $TOTAL_FAIL"
+ echo "Total Skipped : $TOTAL_SKIP"
+ echo "Total exec'd tests : $TOTAL_EXED"
+ echo "Total avail tests : $TOTAL_AVAIL"
+ echo
-# Step 4e - Coverage
-echo "Coverage"
+ # Step 4e - Coverage
+ echo "Coverage"
-LINES_TESTED=$(tail -n3 cov-$TEST_OUTPUT|sed -n -e 's/ lines......: [0-9]*.[0-9]% (\([0-9]*\) of [0-9]* lines)/\1/p')
-LINES_TOTAL=$(tail -n3 cov-$TEST_OUTPUT|sed -n -e 's/ lines......: [0-9]*.[0-9]% ([0-9]* of \([0-9]*\) lines)/\1/p')
-FUNCS_TESTED=$(tail -n3 cov-$TEST_OUTPUT|sed -n -e 's/ functions..: [0-9]*.[0-9]% (\([0-9]*\) of [0-9]* functions)$/\1/p')
-FUNCS_TOTAL=$(tail -n3 cov-$TEST_OUTPUT|sed -n -e 's/ functions..: [0-9]*.[0-9]% ([0-9]* of \([0-9]*\) functions)$/\1/p')
+ LINES_TESTED=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ lines......: [0-9]*.[0-9]% (\([0-9]*\) of [0-9]* lines)/\1/p')
+ LINES_TOTAL=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ lines......: [0-9]*.[0-9]% ([0-9]* of \([0-9]*\) lines)/\1/p')
+ FUNCS_TESTED=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ functions..: [0-9]*.[0-9]% (\([0-9]*\) of [0-9]* functions)$/\1/p')
+ FUNCS_TOTAL=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ functions..: [0-9]*.[0-9]% ([0-9]* of \([0-9]*\) functions)$/\1/p')
+ BRANCHES_TESTED=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ branches...: [0-9]*.[0-9]% (\([0-9]*\) of [0-9]* branches)$/\1/p')
+ BRANCHES_TOTAL=$(tail -n4 cov-$TEST_OUTPUT|sed -n -e 's/ branches...: [0-9]*.[0-9]% ([0-9]* of \([0-9]*\) branches)$/\1/p')
-LINES_PERCENT=$((1000*$LINES_TESTED/$LINES_TOTAL))
-LINES_PERCENT="$(($LINES_PERCENT/10)).$(($LINES_PERCENT-($LINES_PERCENT/10)*10))"
+ LINES_PERCENT=$((1000*$LINES_TESTED/$LINES_TOTAL))
+ LINES_PERCENT="$(($LINES_PERCENT/10)).$(($LINES_PERCENT-($LINES_PERCENT/10)*10))"
-FUNCS_PERCENT=$((1000*$FUNCS_TESTED/$FUNCS_TOTAL))
-FUNCS_PERCENT="$(($FUNCS_PERCENT/10)).$(($FUNCS_PERCENT-($FUNCS_PERCENT/10)*10))"
+ FUNCS_PERCENT=$((1000*$FUNCS_TESTED/$FUNCS_TOTAL))
+ FUNCS_PERCENT="$(($FUNCS_PERCENT/10)).$(($FUNCS_PERCENT-($FUNCS_PERCENT/10)*10))"
-echo "Lines Tested : $LINES_TESTED of $LINES_TOTAL $LINES_PERCENT%"
-echo "Functions Tested : $FUNCS_TESTED of $FUNCS_TOTAL $FUNCS_PERCENT%"
-echo
+ BRANCHES_PERCENT=$((1000*$BRANCHES_TESTED/$BRANCHES_TOTAL))
+ BRANCHES_PERCENT="$(($BRANCHES_PERCENT/10)).$(($BRANCHES_PERCENT-($BRANCHES_PERCENT/10)*10))"
+ rm unit-test-$TEST_OUTPUT
+ rm sys-test-$TEST_OUTPUT
+ rm compat-test-$TEST_OUTPUT
+ rm cov-$TEST_OUTPUT
-rm unit-test-$TEST_OUTPUT
-rm sys-test-$TEST_OUTPUT
-rm compat-test-$TEST_OUTPUT
-rm cov-$TEST_OUTPUT
+ echo "Lines Tested : $LINES_TESTED of $LINES_TOTAL $LINES_PERCENT%"
+ echo "Functions Tested : $FUNCS_TESTED of $FUNCS_TOTAL $FUNCS_PERCENT%"
+ echo "Branches Tested : $BRANCHES_TESTED of $BRANCHES_TOTAL $BRANCHES_PERCENT%"
+ echo
-cd ..
+ # Mark the report generation as having succeeded. This must be the
+ # last thing in the report generation.
+ touch "basic-build-test-$$.ok"
+} | tee coverage-summary.txt
make clean
if [ -f "$CONFIG_BAK" ]; then
mv "$CONFIG_BAK" "$CONFIG_H"
fi
+
+# The file must exist, otherwise it means something went wrong while generating
+# the coverage report. If something did go wrong, rm will complain so this
+# script will exit with a failure status.
+rm "tests/basic-build-test-$$.ok"
diff --git a/tests/scripts/basic-in-docker.sh b/tests/scripts/basic-in-docker.sh
new file mode 100755
index 0000000..50bcb05
--- /dev/null
+++ b/tests/scripts/basic-in-docker.sh
@@ -0,0 +1,45 @@
+#!/bin/bash -eu
+
+# basic-in-docker.sh
+#
+# Purpose
+# -------
+# This runs sanity checks and library tests in a Docker container. The tests
+# are run for both clang and gcc. The testing includes a full test run
+# in the default configuration, partial test runs in the reference
+# configurations, and some dependency tests.
+#
+# Notes for users
+# ---------------
+# See docker_env.sh for prerequisites and other information.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+source tests/scripts/docker_env.sh
+
+run_in_docker tests/scripts/all.sh 'check_*'
+
+for compiler in clang gcc; do
+ run_in_docker -e CC=${compiler} cmake -D CMAKE_BUILD_TYPE:String="Check" .
+ run_in_docker -e CC=${compiler} make
+ run_in_docker -e CC=${compiler} make test
+ run_in_docker programs/test/selftest
+ run_in_docker -e OSSL_NO_DTLS=1 tests/compat.sh
+ run_in_docker tests/ssl-opt.sh -e '\(DTLS\|SCSV\).*openssl'
+ run_in_docker tests/scripts/test-ref-configs.pl
+ run_in_docker tests/scripts/curves.pl
+ run_in_docker tests/scripts/key-exchanges.pl
+done
diff --git a/tests/scripts/check-doxy-blocks.pl b/tests/scripts/check-doxy-blocks.pl
index b0fd696..3ed7069 100755
--- a/tests/scripts/check-doxy-blocks.pl
+++ b/tests/scripts/check-doxy-blocks.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Detect comment blocks that are likely meant to be doxygen blocks but aren't.
#
@@ -7,6 +7,21 @@
# sed -e '/EXTRACT/s/YES/NO/' doxygen/mbedtls.doxyfile | doxygen -
# but that would warn about any undocumented item, while our goal is to find
# items that are documented, but not marked as such by mistake.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use warnings;
use strict;
diff --git a/tests/scripts/check-generated-files.sh b/tests/scripts/check-generated-files.sh
index 0400bc7..a2c285f 100755
--- a/tests/scripts/check-generated-files.sh
+++ b/tests/scripts/check-generated-files.sh
@@ -1,24 +1,117 @@
-#!/bin/sh
+#! /usr/bin/env sh
-# check if generated files are up-to-date
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Purpose
+#
+# Check if generated files are up-to-date.
set -eu
+if [ $# -ne 0 ] && [ "$1" = "--help" ]; then
+ cat <<EOF
+$0 [-u]
+This script checks that all generated file are up-to-date. If some aren't, by
+default the scripts reports it and exits in error; with the -u option, it just
+updates them instead.
+
+ -u Update the files rather than return an error for out-of-date files.
+EOF
+ exit
+fi
+
if [ -d library -a -d include -a -d tests ]; then :; else
echo "Must be run from mbed TLS root" >&2
exit 1
fi
+UPDATE=
+if [ $# -ne 0 ] && [ "$1" = "-u" ]; then
+ shift
+ UPDATE='y'
+fi
+
+# check SCRIPT FILENAME[...]
+# check SCRIPT DIRECTORY
+# Run SCRIPT and check that it does not modify any of the specified files.
+# In the first form, there can be any number of FILENAMEs, which must be
+# regular files.
+# In the second form, there must be a single DIRECTORY, standing for the
+# list of files in the directory. Running SCRIPT must not modify any file
+# in the directory and must not add or remove files either.
+# If $UPDATE is empty, abort with an error status if a file is modified.
check()
{
- FILE=$1
- SCRIPT=$2
+ SCRIPT=$1
+ shift
- cp $FILE $FILE.bak
- $SCRIPT
- diff $FILE $FILE.bak
- mv $FILE.bak $FILE
+ directory=
+ if [ -d "$1" ]; then
+ directory="$1"
+ rm -f "$directory"/*.bak
+ set -- "$1"/*
+ fi
+
+ for FILE in "$@"; do
+ if [ -e "$FILE" ]; then
+ cp "$FILE" "$FILE.bak"
+ else
+ rm -f "$FILE.bak"
+ fi
+ done
+
+ "$SCRIPT"
+
+ # Compare the script output to the old files and remove backups
+ for FILE in "$@"; do
+ if ! diff "$FILE" "$FILE.bak" >/dev/null 2>&1; then
+ echo "'$FILE' was either modified or deleted by '$SCRIPT'"
+ if [ -z "$UPDATE" ]; then
+ exit 1
+ fi
+ fi
+ if [ -z "$UPDATE" ]; then
+ mv "$FILE.bak" "$FILE"
+ else
+ rm -f "$FILE.bak"
+ fi
+ done
+
+ if [ -n "$directory" ]; then
+ old_list="$*"
+ set -- "$directory"/*
+ new_list="$*"
+ # Check if there are any new files
+ if [ "$old_list" != "$new_list" ]; then
+ echo "Files were deleted or created by '$SCRIPT'"
+ echo "Before: $old_list"
+ echo "After: $new_list"
+ if [ -z "$UPDATE" ]; then
+ exit 1
+ fi
+ fi
+ fi
}
-check library/error.c scripts/generate_errors.pl
-check library/version_features.c scripts/generate_features.pl
+check scripts/generate_errors.pl library/error.c
+check scripts/generate_query_config.pl programs/test/query_config.c
+check scripts/generate_features.pl library/version_features.c
+# generate_visualc_files enumerates source files (library/*.c). It doesn't
+# care about their content, but the files must exist. So it must run after
+# the step that creates or updates these files.
+check scripts/generate_visualc_files.pl visualc/VS2010
+check scripts/generate_psa_constants.py programs/psa/psa_constant_names_generated.c
+check tests/scripts/generate_psa_tests.py $(tests/scripts/generate_psa_tests.py --list)
diff --git a/tests/scripts/check-python-files.sh b/tests/scripts/check-python-files.sh
new file mode 100755
index 0000000..449803a5
--- /dev/null
+++ b/tests/scripts/check-python-files.sh
@@ -0,0 +1,83 @@
+#! /usr/bin/env sh
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Purpose: check Python files for potential programming errors or maintenance
+# hurdles. Run pylint to detect some potential mistakes and enforce PEP8
+# coding standards. If available, run mypy to perform static type checking.
+
+# We'll keep going on errors and report the status at the end.
+ret=0
+
+if type python3 >/dev/null 2>/dev/null; then
+ PYTHON=python3
+else
+ PYTHON=python
+fi
+
+check_version () {
+ $PYTHON - "$2" <<EOF
+import packaging.version
+import sys
+import $1 as package
+actual = package.__version__
+wanted = sys.argv[1]
+if packaging.version.parse(actual) < packaging.version.parse(wanted):
+ sys.stderr.write("$1: version %s is too old (want %s)\n" % (actual, wanted))
+ exit(1)
+EOF
+}
+
+can_pylint () {
+ # Pylint 1.5.2 from Ubuntu 16.04 is too old:
+ # E: 34, 0: Unable to import 'mbedtls_dev' (import-error)
+ # Pylint 1.8.3 from Ubuntu 18.04 passed on the first commit containing this line.
+ check_version pylint 1.8.3
+}
+
+can_mypy () {
+ # mypy 0.770 is too old:
+ # tests/scripts/test_psa_constant_names.py:34: error: Cannot find implementation or library stub for module named 'mbedtls_dev'
+ # mypy 0.780 from pip passed on the first commit containing this line.
+ check_version mypy.version 0.780
+}
+
+# With just a --can-xxx option, check whether the tool for xxx is available
+# with an acceptable version, and exit without running any checks. The exit
+# status is true if the tool is available and acceptable and false otherwise.
+if [ "$1" = "--can-pylint" ]; then
+ can_pylint
+ exit
+elif [ "$1" = "--can-mypy" ]; then
+ can_mypy
+ exit
+fi
+
+echo 'Running pylint ...'
+$PYTHON -m pylint -j 2 scripts/mbedtls_dev/*.py scripts/*.py tests/scripts/*.py || {
+ echo >&2 "pylint reported errors"
+ ret=1
+}
+
+# Check types if mypy is available
+if can_mypy; then
+ echo
+ echo 'Running mypy ...'
+ $PYTHON -m mypy scripts/*.py tests/scripts/*.py ||
+ ret=1
+fi
+
+exit $ret
diff --git a/tests/scripts/check_files.py b/tests/scripts/check_files.py
new file mode 100755
index 0000000..8857e00
--- /dev/null
+++ b/tests/scripts/check_files.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python3
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This script checks the current state of the source code for minor issues,
+including incorrect file permissions, presence of tabs, non-Unix line endings,
+trailing whitespace, and presence of UTF-8 BOM.
+Note: requires python 3, must be run from Mbed TLS root.
+"""
+
+import os
+import argparse
+import logging
+import codecs
+import re
+import subprocess
+import sys
+try:
+ from typing import FrozenSet, Optional, Pattern # pylint: disable=unused-import
+except ImportError:
+ pass
+
+
+class FileIssueTracker:
+ """Base class for file-wide issue tracking.
+
+ To implement a checker that processes a file as a whole, inherit from
+ this class and implement `check_file_for_issue` and define ``heading``.
+
+ ``suffix_exemptions``: files whose name ends with a string in this set
+ will not be checked.
+
+ ``path_exemptions``: files whose path (relative to the root of the source
+ tree) matches this regular expression will not be checked. This can be
+ ``None`` to match no path. Paths are normalized and converted to ``/``
+ separators before matching.
+
+ ``heading``: human-readable description of the issue
+ """
+
+ suffix_exemptions = frozenset() #type: FrozenSet[str]
+ path_exemptions = None #type: Optional[Pattern[str]]
+ # heading must be defined in derived classes.
+ # pylint: disable=no-member
+
+ def __init__(self):
+ self.files_with_issues = {}
+
+ @staticmethod
+ def normalize_path(filepath):
+ """Normalize ``filepath`` with / as the directory separator."""
+ filepath = os.path.normpath(filepath)
+ # On Windows, we may have backslashes to separate directories.
+ # We need slashes to match exemption lists.
+ seps = os.path.sep
+ if os.path.altsep is not None:
+ seps += os.path.altsep
+ return '/'.join(filepath.split(seps))
+
+ def should_check_file(self, filepath):
+ """Whether the given file name should be checked.
+
+ Files whose name ends with a string listed in ``self.suffix_exemptions``
+ or whose path matches ``self.path_exemptions`` will not be checked.
+ """
+ for files_exemption in self.suffix_exemptions:
+ if filepath.endswith(files_exemption):
+ return False
+ if self.path_exemptions and \
+ re.match(self.path_exemptions, self.normalize_path(filepath)):
+ return False
+ return True
+
+ def check_file_for_issue(self, filepath):
+ """Check the specified file for the issue that this class is for.
+
+ Subclasses must implement this method.
+ """
+ raise NotImplementedError
+
+ def record_issue(self, filepath, line_number):
+ """Record that an issue was found at the specified location."""
+ if filepath not in self.files_with_issues.keys():
+ self.files_with_issues[filepath] = []
+ self.files_with_issues[filepath].append(line_number)
+
+ def output_file_issues(self, logger):
+ """Log all the locations where the issue was found."""
+ if self.files_with_issues.values():
+ logger.info(self.heading)
+ for filename, lines in sorted(self.files_with_issues.items()):
+ if lines:
+ logger.info("{}: {}".format(
+ filename, ", ".join(str(x) for x in lines)
+ ))
+ else:
+ logger.info(filename)
+ logger.info("")
+
+BINARY_FILE_PATH_RE_LIST = [
+ r'docs/.*\.pdf\Z',
+ r'programs/fuzz/corpuses/[^.]+\Z',
+ r'tests/data_files/[^.]+\Z',
+ r'tests/data_files/.*\.(crt|csr|db|der|key|pubkey)\Z',
+ r'tests/data_files/.*\.req\.[^/]+\Z',
+ r'tests/data_files/.*malformed[^/]+\Z',
+ r'tests/data_files/format_pkcs12\.fmt\Z',
+]
+BINARY_FILE_PATH_RE = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST))
+
+class LineIssueTracker(FileIssueTracker):
+ """Base class for line-by-line issue tracking.
+
+ To implement a checker that processes files line by line, inherit from
+ this class and implement `line_with_issue`.
+ """
+
+ # Exclude binary files.
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def issue_with_line(self, line, filepath):
+ """Check the specified line for the issue that this class is for.
+
+ Subclasses must implement this method.
+ """
+ raise NotImplementedError
+
+ def check_file_line(self, filepath, line, line_number):
+ if self.issue_with_line(line, filepath):
+ self.record_issue(filepath, line_number)
+
+ def check_file_for_issue(self, filepath):
+ """Check the lines of the specified file.
+
+ Subclasses must implement the ``issue_with_line`` method.
+ """
+ with open(filepath, "rb") as f:
+ for i, line in enumerate(iter(f.readline, b"")):
+ self.check_file_line(filepath, line, i + 1)
+
+
+def is_windows_file(filepath):
+ _root, ext = os.path.splitext(filepath)
+ return ext in ('.bat', '.dsp', '.dsw', '.sln', '.vcxproj')
+
+
+class PermissionIssueTracker(FileIssueTracker):
+ """Track files with bad permissions.
+
+ Files that are not executable scripts must not be executable."""
+
+ heading = "Incorrect permissions:"
+
+ # .py files can be either full scripts or modules, so they may or may
+ # not be executable.
+ suffix_exemptions = frozenset({".py"})
+
+ def check_file_for_issue(self, filepath):
+ is_executable = os.access(filepath, os.X_OK)
+ should_be_executable = filepath.endswith((".sh", ".pl"))
+ if is_executable != should_be_executable:
+ self.files_with_issues[filepath] = None
+
+
+class ShebangIssueTracker(FileIssueTracker):
+ """Track files with a bad, missing or extraneous shebang line.
+
+ Executable scripts must start with a valid shebang (#!) line.
+ """
+
+ heading = "Invalid shebang line:"
+
+ # Allow either /bin/sh, /bin/bash, or /usr/bin/env.
+ # Allow at most one argument (this is a Linux limitation).
+ # For sh and bash, the argument if present must be options.
+ # For env, the argument must be the base name of the interpeter.
+ _shebang_re = re.compile(rb'^#! ?(?:/bin/(bash|sh)(?: -[^\n ]*)?'
+ rb'|/usr/bin/env ([^\n /]+))$')
+ _extensions = {
+ b'bash': 'sh',
+ b'perl': 'pl',
+ b'python3': 'py',
+ b'sh': 'sh',
+ }
+
+ def is_valid_shebang(self, first_line, filepath):
+ m = re.match(self._shebang_re, first_line)
+ if not m:
+ return False
+ interpreter = m.group(1) or m.group(2)
+ if interpreter not in self._extensions:
+ return False
+ if not filepath.endswith('.' + self._extensions[interpreter]):
+ return False
+ return True
+
+ def check_file_for_issue(self, filepath):
+ is_executable = os.access(filepath, os.X_OK)
+ with open(filepath, "rb") as f:
+ first_line = f.readline()
+ if first_line.startswith(b'#!'):
+ if not is_executable:
+ # Shebang on a non-executable file
+ self.files_with_issues[filepath] = None
+ elif not self.is_valid_shebang(first_line, filepath):
+ self.files_with_issues[filepath] = [1]
+ elif is_executable:
+ # Executable without a shebang
+ self.files_with_issues[filepath] = None
+
+
+class EndOfFileNewlineIssueTracker(FileIssueTracker):
+ """Track files that end with an incomplete line
+ (no newline character at the end of the last line)."""
+
+ heading = "Missing newline at end of file:"
+
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def check_file_for_issue(self, filepath):
+ with open(filepath, "rb") as f:
+ try:
+ f.seek(-1, 2)
+ except OSError:
+ # This script only works on regular files. If we can't seek
+ # 1 before the end, it means that this position is before
+ # the beginning of the file, i.e. that the file is empty.
+ return
+ if f.read(1) != b"\n":
+ self.files_with_issues[filepath] = None
+
+
+class Utf8BomIssueTracker(FileIssueTracker):
+ """Track files that start with a UTF-8 BOM.
+ Files should be ASCII or UTF-8. Valid UTF-8 does not start with a BOM."""
+
+ heading = "UTF-8 BOM present:"
+
+ suffix_exemptions = frozenset([".vcxproj", ".sln"])
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def check_file_for_issue(self, filepath):
+ with open(filepath, "rb") as f:
+ if f.read().startswith(codecs.BOM_UTF8):
+ self.files_with_issues[filepath] = None
+
+
+class UnixLineEndingIssueTracker(LineIssueTracker):
+ """Track files with non-Unix line endings (i.e. files with CR)."""
+
+ heading = "Non-Unix line endings:"
+
+ def should_check_file(self, filepath):
+ if not super().should_check_file(filepath):
+ return False
+ return not is_windows_file(filepath)
+
+ def issue_with_line(self, line, _filepath):
+ return b"\r" in line
+
+
+class WindowsLineEndingIssueTracker(LineIssueTracker):
+ """Track files with non-Windows line endings (i.e. CR or LF not in CRLF)."""
+
+ heading = "Non-Windows line endings:"
+
+ def should_check_file(self, filepath):
+ if not super().should_check_file(filepath):
+ return False
+ return is_windows_file(filepath)
+
+ def issue_with_line(self, line, _filepath):
+ return not line.endswith(b"\r\n") or b"\r" in line[:-2]
+
+
+class TrailingWhitespaceIssueTracker(LineIssueTracker):
+ """Track lines with trailing whitespace."""
+
+ heading = "Trailing whitespace:"
+ suffix_exemptions = frozenset([".dsp", ".md"])
+
+ def issue_with_line(self, line, _filepath):
+ return line.rstrip(b"\r\n") != line.rstrip()
+
+
+class TabIssueTracker(LineIssueTracker):
+ """Track lines with tabs."""
+
+ heading = "Tabs present:"
+ suffix_exemptions = frozenset([
+ ".pem", # some openssl dumps have tabs
+ ".sln",
+ "/Makefile",
+ "/Makefile.inc",
+ "/generate_visualc_files.pl",
+ ])
+
+ def issue_with_line(self, line, _filepath):
+ return b"\t" in line
+
+
+class MergeArtifactIssueTracker(LineIssueTracker):
+ """Track lines with merge artifacts.
+ These are leftovers from a ``git merge`` that wasn't fully edited."""
+
+ heading = "Merge artifact:"
+
+ def issue_with_line(self, line, _filepath):
+ # Detect leftover git conflict markers.
+ if line.startswith(b'<<<<<<< ') or line.startswith(b'>>>>>>> '):
+ return True
+ if line.startswith(b'||||||| '): # from merge.conflictStyle=diff3
+ return True
+ if line.rstrip(b'\r\n') == b'=======' and \
+ not _filepath.endswith('.md'):
+ return True
+ return False
+
+
+class IntegrityChecker:
+ """Sanity-check files under the current directory."""
+
+ def __init__(self, log_file):
+ """Instantiate the sanity checker.
+ Check files under the current directory.
+ Write a report of issues to log_file."""
+ self.check_repo_path()
+ self.logger = None
+ self.setup_logger(log_file)
+ self.issues_to_check = [
+ PermissionIssueTracker(),
+ ShebangIssueTracker(),
+ EndOfFileNewlineIssueTracker(),
+ Utf8BomIssueTracker(),
+ UnixLineEndingIssueTracker(),
+ WindowsLineEndingIssueTracker(),
+ TrailingWhitespaceIssueTracker(),
+ TabIssueTracker(),
+ MergeArtifactIssueTracker(),
+ ]
+
+ @staticmethod
+ def check_repo_path():
+ if not all(os.path.isdir(d) for d in ["include", "library", "tests"]):
+ raise Exception("Must be run from Mbed TLS root")
+
+ def setup_logger(self, log_file, level=logging.INFO):
+ self.logger = logging.getLogger()
+ self.logger.setLevel(level)
+ if log_file:
+ handler = logging.FileHandler(log_file)
+ self.logger.addHandler(handler)
+ else:
+ console = logging.StreamHandler()
+ self.logger.addHandler(console)
+
+ @staticmethod
+ def collect_files():
+ bytes_output = subprocess.check_output(['git', 'ls-files', '-z'])
+ bytes_filepaths = bytes_output.split(b'\0')[:-1]
+ ascii_filepaths = map(lambda fp: fp.decode('ascii'), bytes_filepaths)
+ # Prepend './' to files in the top-level directory so that
+ # something like `'/Makefile' in fp` matches in the top-level
+ # directory as well as in subdirectories.
+ return [fp if os.path.dirname(fp) else os.path.join(os.curdir, fp)
+ for fp in ascii_filepaths]
+
+ def check_files(self):
+ for issue_to_check in self.issues_to_check:
+ for filepath in self.collect_files():
+ if issue_to_check.should_check_file(filepath):
+ issue_to_check.check_file_for_issue(filepath)
+
+ def output_issues(self):
+ integrity_return_code = 0
+ for issue_to_check in self.issues_to_check:
+ if issue_to_check.files_with_issues:
+ integrity_return_code = 1
+ issue_to_check.output_file_issues(self.logger)
+ return integrity_return_code
+
+
+def run_main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "-l", "--log_file", type=str, help="path to optional output log",
+ )
+ check_args = parser.parse_args()
+ integrity_check = IntegrityChecker(check_args.log_file)
+ integrity_check.check_files()
+ return_code = integrity_check.output_issues()
+ sys.exit(return_code)
+
+
+if __name__ == "__main__":
+ run_main()
diff --git a/tests/scripts/check_test_cases.py b/tests/scripts/check_test_cases.py
new file mode 100755
index 0000000..fe11f20
--- /dev/null
+++ b/tests/scripts/check_test_cases.py
@@ -0,0 +1,192 @@
+#!/usr/bin/env python3
+
+"""Sanity checks for test data.
+
+This program contains a class for traversing test cases that can be used
+independently of the checks.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import glob
+import os
+import re
+import sys
+
+class Results:
+ """Store file and line information about errors or warnings in test suites."""
+
+ def __init__(self, options):
+ self.errors = 0
+ self.warnings = 0
+ self.ignore_warnings = options.quiet
+
+ def error(self, file_name, line_number, fmt, *args):
+ sys.stderr.write(('{}:{}:ERROR:' + fmt + '\n').
+ format(file_name, line_number, *args))
+ self.errors += 1
+
+ def warning(self, file_name, line_number, fmt, *args):
+ if not self.ignore_warnings:
+ sys.stderr.write(('{}:{}:Warning:' + fmt + '\n')
+ .format(file_name, line_number, *args))
+ self.warnings += 1
+
+class TestDescriptionExplorer:
+ """An iterator over test cases with descriptions.
+
+The test cases that have descriptions are:
+* Individual unit tests (entries in a .data file) in test suites.
+* Individual test cases in ssl-opt.sh.
+
+This is an abstract class. To use it, derive a class that implements
+the process_test_case method, and call walk_all().
+"""
+
+ def process_test_case(self, per_file_state,
+ file_name, line_number, description):
+ """Process a test case.
+
+per_file_state: an object created by new_per_file_state() at the beginning
+ of each file.
+file_name: a relative path to the file containing the test case.
+line_number: the line number in the given file.
+description: the test case description as a byte string.
+"""
+ raise NotImplementedError
+
+ def new_per_file_state(self):
+ """Return a new per-file state object.
+
+The default per-file state object is None. Child classes that require per-file
+state may override this method.
+"""
+ #pylint: disable=no-self-use
+ return None
+
+ def walk_test_suite(self, data_file_name):
+ """Iterate over the test cases in the given unit test data file."""
+ in_paragraph = False
+ descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+ with open(data_file_name, 'rb') as data_file:
+ for line_number, line in enumerate(data_file, 1):
+ line = line.rstrip(b'\r\n')
+ if not line:
+ in_paragraph = False
+ continue
+ if line.startswith(b'#'):
+ continue
+ if not in_paragraph:
+ # This is a test case description line.
+ self.process_test_case(descriptions,
+ data_file_name, line_number, line)
+ in_paragraph = True
+
+ def walk_ssl_opt_sh(self, file_name):
+ """Iterate over the test cases in ssl-opt.sh or a file with a similar format."""
+ descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+ with open(file_name, 'rb') as file_contents:
+ for line_number, line in enumerate(file_contents, 1):
+ # Assume that all run_test calls have the same simple form
+ # with the test description entirely on the same line as the
+ # function name.
+ m = re.match(br'\s*run_test\s+"((?:[^\\"]|\\.)*)"', line)
+ if not m:
+ continue
+ description = m.group(1)
+ self.process_test_case(descriptions,
+ file_name, line_number, description)
+
+ @staticmethod
+ def collect_test_directories():
+ """Get the relative path for the TLS and Crypto test directories."""
+ if os.path.isdir('tests'):
+ tests_dir = 'tests'
+ elif os.path.isdir('suites'):
+ tests_dir = '.'
+ elif os.path.isdir('../suites'):
+ tests_dir = '..'
+ directories = [tests_dir]
+ return directories
+
+ def walk_all(self):
+ """Iterate over all named test cases."""
+ test_directories = self.collect_test_directories()
+ for directory in test_directories:
+ for data_file_name in glob.glob(os.path.join(directory, 'suites',
+ '*.data')):
+ self.walk_test_suite(data_file_name)
+ ssl_opt_sh = os.path.join(directory, 'ssl-opt.sh')
+ if os.path.exists(ssl_opt_sh):
+ self.walk_ssl_opt_sh(ssl_opt_sh)
+
+class DescriptionChecker(TestDescriptionExplorer):
+ """Check all test case descriptions.
+
+* Check that each description is valid (length, allowed character set, etc.).
+* Check that there is no duplicated description inside of one test suite.
+"""
+
+ def __init__(self, results):
+ self.results = results
+
+ def new_per_file_state(self):
+ """Dictionary mapping descriptions to their line number."""
+ return {}
+
+ def process_test_case(self, per_file_state,
+ file_name, line_number, description):
+ """Check test case descriptions for errors."""
+ results = self.results
+ seen = per_file_state
+ if description in seen:
+ results.error(file_name, line_number,
+ 'Duplicate description (also line {})',
+ seen[description])
+ return
+ if re.search(br'[\t;]', description):
+ results.error(file_name, line_number,
+ 'Forbidden character \'{}\' in description',
+ re.search(br'[\t;]', description).group(0).decode('ascii'))
+ if re.search(br'[^ -~]', description):
+ results.error(file_name, line_number,
+ 'Non-ASCII character in description')
+ if len(description) > 66:
+ results.warning(file_name, line_number,
+ 'Test description too long ({} > 66)',
+ len(description))
+ seen[description] = line_number
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--quiet', '-q',
+ action='store_true',
+ help='Hide warnings')
+ parser.add_argument('--verbose', '-v',
+ action='store_false', dest='quiet',
+ help='Show warnings (default: on; undoes --quiet)')
+ options = parser.parse_args()
+ results = Results(options)
+ checker = DescriptionChecker(results)
+ checker.walk_all()
+ if (results.warnings or results.errors) and not options.quiet:
+ sys.stderr.write('{}: {} errors, {} warnings\n'
+ .format(sys.argv[0], results.errors, results.warnings))
+ sys.exit(1 if results.errors else 0)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/curves.pl b/tests/scripts/curves.pl
index 0041814..47196f4 100755
--- a/tests/scripts/curves.pl
+++ b/tests/scripts/curves.pl
@@ -1,26 +1,43 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# curves.pl
#
-# Copyright (c) 2014-2016, ARM Limited, All Rights Reserved
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
-# To test the code dependencies on individual curves in each test suite. This
-# is a verification step to ensure we don't ship test suites that do not work
-# for some build options.
+# The purpose of this test script is to validate that the library works
+# with any combination of elliptic curves. To this effect, build the library
+# and run the test suite with each tested combination of elliptic curves.
#
-# The process is:
-# for each possible curve
-# build the library and test suites with the curve disabled
-# execute the test suites
+# Testing all 2^n combinations would be too much, so we only test 2*n:
#
-# And any test suite with the wrong dependencies will fail.
-#
+# 1. Test with a single curve, for each curve. This validates that the
+# library works with any curve, and in particular that curve-specific
+# code is guarded by the proper preprocessor conditionals.
+# 2. Test with all curves except one, for each curve. This validates that
+# the test cases have correct dependencies. Testing with a single curve
+# doesn't validate this for tests that require more than one curve.
+
# Usage: tests/scripts/curves.pl
#
# This script should be executed from the root of the project directory.
#
+# Only curves that are enabled in mbedtls_config.h will be tested.
+#
# For best effect, run either with cmake disabled, or cmake enabled in a mode
# that includes -Werror.
@@ -30,9 +47,28 @@
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
my $sed_cmd = 's/^#define \(MBEDTLS_ECP_DP.*_ENABLED\)/\1/p';
-my $config_h = 'include/mbedtls/config.h';
+my $config_h = 'include/mbedtls/mbedtls_config.h';
my @curves = split( /\s+/, `sed -n -e '$sed_cmd' $config_h` );
+# Determine which curves support ECDSA by checking the dependencies of
+# ECDSA in check_config.h.
+my %curve_supports_ecdsa = ();
+{
+ local $/ = "";
+ local *CHECK_CONFIG;
+ open(CHECK_CONFIG, '<', 'include/mbedtls/check_config.h')
+ or die "open include/mbedtls/check_config.h: $!";
+ while (my $stanza = <CHECK_CONFIG>) {
+ if ($stanza =~ /\A#if defined\(MBEDTLS_ECDSA_C\)/) {
+ for my $curve ($stanza =~ /(?<=\()MBEDTLS_ECP_DP_\w+_ENABLED(?=\))/g) {
+ $curve_supports_ecdsa{$curve} = 1;
+ }
+ last;
+ }
+ }
+ close(CHECK_CONFIG);
+}
+
system( "cp $config_h $config_h.bak" ) and die;
sub abort {
system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
@@ -41,24 +77,66 @@
exit 1;
}
+# Disable all the curves. We'll then re-enable them one by one.
+for my $curve (@curves) {
+ system( "scripts/config.pl unset $curve" )
+ and abort "Failed to disable $curve\n";
+}
+# Depends on a specific curve. Also, ignore error if it wasn't enabled.
+system( "scripts/config.pl unset MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED" );
+system( "scripts/config.pl unset MBEDTLS_ECJPAKE_C" );
+
+# Test with only $curve enabled, for each $curve.
+for my $curve (@curves) {
+ system( "make clean" ) and die;
+
+ print "\n******************************************\n";
+ print "* Testing with only curve: $curve\n";
+ print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = "$curve";
+
+ system( "scripts/config.pl set $curve" )
+ and abort "Failed to enable $curve\n";
+
+ my $ecdsa = $curve_supports_ecdsa{$curve} ? "set" : "unset";
+ for my $dep (qw(MBEDTLS_ECDSA_C
+ MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED)) {
+ system( "scripts/config.pl $ecdsa $dep" )
+ and abort "Failed to $ecdsa $dep\n";
+ }
+
+ system( "CFLAGS='-Werror -Wall -Wextra' make" )
+ and abort "Failed to build: only $curve\n";
+ system( "make test" )
+ and abort "Failed test suite: only $curve\n";
+
+ system( "scripts/config.pl unset $curve" )
+ and abort "Failed to disable $curve\n";
+}
+
+system( "cp $config_h.bak $config_h" ) and die "$config_h not restored\n";
+
+# Test with $curve disabled but the others enabled, for each $curve.
for my $curve (@curves) {
system( "cp $config_h.bak $config_h" ) and die "$config_h not restored\n";
system( "make clean" ) and die;
# depends on a specific curve. Also, ignore error if it wasn't enabled
- system( "scripts/config.pl unset MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED" );
+ system( "scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED" );
print "\n******************************************\n";
print "* Testing without curve: $curve\n";
print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = "-$curve";
- system( "scripts/config.pl unset $curve" )
+ system( "scripts/config.py unset $curve" )
and abort "Failed to disable $curve\n";
- system( "CFLAGS='-Werror -Wall -Wextra' make lib" )
- and abort "Failed to build lib: $curve\n";
- system( "cd tests && make" ) and abort "Failed to build tests: $curve\n";
- system( "make test" ) and abort "Failed test suite: $curve\n";
+ system( "CFLAGS='-Werror -Wall -Wextra' make" )
+ and abort "Failed to build: all but $curve\n";
+ system( "make test" )
+ and abort "Failed test suite: all but $curve\n";
}
diff --git a/tests/scripts/depends-hashes.pl b/tests/scripts/depends-hashes.pl
index 29dcfb0..cd17066 100755
--- a/tests/scripts/depends-hashes.pl
+++ b/tests/scripts/depends-hashes.pl
@@ -1,8 +1,21 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# depends-hashes.pl
#
-# Copyright (c) 2017, ARM Limited, All Rights Reserved
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -29,7 +42,7 @@
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
-my $config_h = 'include/mbedtls/config.h';
+my $config_h = 'include/mbedtls/mbedtls_config.h';
# as many SSL options depend on specific hashes,
# and SSL is not in the test suites anyways,
@@ -37,11 +50,17 @@
my $ssl_sed_cmd = 's/^#define \(MBEDTLS_SSL.*\)/\1/p';
my @ssl = split( /\s+/, `sed -n -e '$ssl_sed_cmd' $config_h` );
-# for md we want to catch MD5_C but not MD_C, hence the extra dot
-my $mdx_sed_cmd = 's/^#define \(MBEDTLS_MD..*_C\)/\1/p';
-my $sha_sed_cmd = 's/^#define \(MBEDTLS_SHA.*_C\)/\1/p';
-my @hashes = split( /\s+/,
- `sed -n -e '$mdx_sed_cmd' -e '$sha_sed_cmd' $config_h` );
+# Each element of this array holds list of configuration options that
+# should be tested together. Certain options depend on eachother and
+# separating them would generate invalid configurations.
+my @hash_configs = (
+ ['unset MBEDTLS_MD5_C'],
+ ['unset MBEDTLS_SHA512_C', 'unset MBEDTLS_SHA384_C '],
+ ['unset MBEDTLS_SHA384_C'],
+ ['unset MBEDTLS_SHA256_C', 'unset MBEDTLS_SHA224_C'],
+ ['unset MBEDTLS_SHA1_C'],
+);
+
system( "cp $config_h $config_h.bak" ) and die;
sub abort {
system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
@@ -50,26 +69,31 @@
exit 1;
}
-for my $hash (@hashes) {
+for my $hash_config (@hash_configs) {
system( "cp $config_h.bak $config_h" ) and die "$config_h not restored\n";
system( "make clean" ) and die;
- print "\n******************************************\n";
- print "* Testing without hash: $hash\n";
- print "******************************************\n";
+ my $hash_config_string = join(', ', @$hash_config);
- system( "scripts/config.pl unset $hash" )
- and abort "Failed to disable $hash\n";
+ print "\n******************************************\n";
+ print "* Testing hash options: $hash_config_string\n";
+ print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = "-$hash_config_string";
+
+ for my $hash (@$hash_config) {
+ system( "scripts/config.py $hash" )
+ and abort "Failed to $hash\n";
+ }
for my $opt (@ssl) {
- system( "scripts/config.pl unset $opt" )
+ system( "scripts/config.py unset $opt" )
and abort "Failed to disable $opt\n";
}
system( "CFLAGS='-Werror -Wall -Wextra' make lib" )
- and abort "Failed to build lib: $hash\n";
- system( "cd tests && make" ) and abort "Failed to build tests: $hash\n";
- system( "make test" ) and abort "Failed test suite: $hash\n";
+ and abort "Failed to build lib: $hash_config_string\n";
+ system( "cd tests && make" ) and abort "Failed to build tests: $hash_config_string\n";
+ system( "make test" ) and abort "Failed test suite: $hash_config_string\n";
}
system( "mv $config_h.bak $config_h" ) and die "$config_h not restored\n";
diff --git a/tests/scripts/depends-pkalgs.pl b/tests/scripts/depends-pkalgs.pl
index 14c92b2..6eb7269 100755
--- a/tests/scripts/depends-pkalgs.pl
+++ b/tests/scripts/depends-pkalgs.pl
@@ -1,8 +1,21 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# depends-pkalgs.pl
#
-# Copyright (c) 2017, ARM Limited, All Rights Reserved
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -30,12 +43,13 @@
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
-my $config_h = 'include/mbedtls/config.h';
+my $config_h = 'include/mbedtls/mbedtls_config.h';
# Some algorithms can't be disabled on their own as others depend on them, so
# we list those reverse-dependencies here to keep check_config.h happy.
my %algs = (
- 'MBEDTLS_ECDSA_C' => ['MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED'],
+ 'MBEDTLS_ECDSA_C' => ['MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED'],
'MBEDTLS_ECP_C' => ['MBEDTLS_ECDSA_C',
'MBEDTLS_ECDH_C',
'MBEDTLS_ECJPAKE_C',
@@ -53,6 +67,7 @@
'MBEDTLS_RSA_C' => ['MBEDTLS_X509_RSASSA_PSS_SUPPORT',
'MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED',
'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED',
'MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED',
'MBEDTLS_KEY_EXCHANGE_RSA_ENABLED'],
);
@@ -72,11 +87,12 @@
print "\n******************************************\n";
print "* Testing without alg: $alg\n";
print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = "-$alg";
- system( "scripts/config.pl unset $alg" )
+ system( "scripts/config.py unset $alg" )
and abort "Failed to disable $alg\n";
for my $opt (@$extras) {
- system( "scripts/config.pl unset $opt" )
+ system( "scripts/config.py unset $opt" )
and abort "Failed to disable $opt\n";
}
diff --git a/tests/scripts/docker_env.sh b/tests/scripts/docker_env.sh
new file mode 100755
index 0000000..be96c72
--- /dev/null
+++ b/tests/scripts/docker_env.sh
@@ -0,0 +1,98 @@
+#!/bin/bash -eu
+
+# docker_env.sh
+#
+# Purpose
+# -------
+#
+# This is a helper script to enable running tests under a Docker container,
+# thus making it easier to get set up as well as isolating test dependencies
+# (which include legacy/insecure configurations of openssl and gnutls).
+#
+# Notes for users
+# ---------------
+# This script expects a Linux x86_64 system with a recent version of Docker
+# installed and available for use, as well as http/https access. If a proxy
+# server must be used, invoke this script with the usual environment variables
+# (http_proxy and https_proxy) set appropriately. If an alternate Docker
+# registry is needed, specify MBEDTLS_DOCKER_REGISTRY to point at the
+# host name.
+#
+#
+# Running this script directly will check for Docker availability and set up
+# the Docker image.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# default values, can be overridden by the environment
+: ${MBEDTLS_DOCKER_GUEST:=bionic}
+
+
+DOCKER_IMAGE_TAG="armmbed/mbedtls-test:${MBEDTLS_DOCKER_GUEST}"
+
+# Make sure docker is available
+if ! which docker > /dev/null; then
+ echo "Docker is required but doesn't seem to be installed. See https://www.docker.com/ to get started"
+ exit 1
+fi
+
+# Figure out if we need to 'sudo docker'
+if groups | grep docker > /dev/null; then
+ DOCKER="docker"
+else
+ echo "Using sudo to invoke docker since you're not a member of the docker group..."
+ DOCKER="sudo docker"
+fi
+
+# Figure out the number of processors available
+if [ "$(uname)" == "Darwin" ]; then
+ NUM_PROC="$(sysctl -n hw.logicalcpu)"
+else
+ NUM_PROC="$(nproc)"
+fi
+
+# Build the Docker image
+echo "Getting docker image up to date (this may take a few minutes)..."
+${DOCKER} image build \
+ -t ${DOCKER_IMAGE_TAG} \
+ --cache-from=${DOCKER_IMAGE_TAG} \
+ --build-arg MAKEFLAGS_PARALLEL="-j ${NUM_PROC}" \
+ --network host \
+ ${http_proxy+--build-arg http_proxy=${http_proxy}} \
+ ${https_proxy+--build-arg https_proxy=${https_proxy}} \
+ ${MBEDTLS_DOCKER_REGISTRY+--build-arg MY_REGISTRY="${MBEDTLS_DOCKER_REGISTRY}/"} \
+ tests/docker/${MBEDTLS_DOCKER_GUEST}
+
+run_in_docker()
+{
+ ENV_ARGS=""
+ while [ "$1" == "-e" ]; do
+ ENV_ARGS="${ENV_ARGS} $1 $2"
+ shift 2
+ done
+
+ ${DOCKER} container run -it --rm \
+ --cap-add SYS_PTRACE \
+ --user "$(id -u):$(id -g)" \
+ --volume $PWD:$PWD \
+ --workdir $PWD \
+ -e MAKEFLAGS \
+ -e PYLINTHOME=/tmp/.pylintd \
+ ${ENV_ARGS} \
+ ${DOCKER_IMAGE_TAG} \
+ $@
+}
diff --git a/tests/scripts/doxygen.sh b/tests/scripts/doxygen.sh
index e7758c9..2c523ba 100755
--- a/tests/scripts/doxygen.sh
+++ b/tests/scripts/doxygen.sh
@@ -1,6 +1,21 @@
#!/bin/sh
# Make sure the doxygen documentation builds without warnings
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
# Abort on errors (and uninitiliased variables)
set -eu
diff --git a/tests/scripts/gen_ctr_drbg.pl b/tests/scripts/gen_ctr_drbg.pl
index ee13024..2345b9e 100755
--- a/tests/scripts/gen_ctr_drbg.pl
+++ b/tests/scripts/gen_ctr_drbg.pl
@@ -1,8 +1,23 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
# Based on NIST CTR_DRBG.rsp validation file
# Only uses AES-256-CTR cases that use a Derivation function
# and concats nonce and personalization for initialization.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use strict;
diff --git a/tests/scripts/gen_gcm_decrypt.pl b/tests/scripts/gen_gcm_decrypt.pl
index 6decac2..354e351 100755
--- a/tests/scripts/gen_gcm_decrypt.pl
+++ b/tests/scripts/gen_gcm_decrypt.pl
@@ -1,7 +1,22 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
# Based on NIST gcmDecryptxxx.rsp validation files
# Only first 3 of every set used for compile time saving
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use strict;
diff --git a/tests/scripts/gen_gcm_encrypt.pl b/tests/scripts/gen_gcm_encrypt.pl
index 8adbbce..101456f 100755
--- a/tests/scripts/gen_gcm_encrypt.pl
+++ b/tests/scripts/gen_gcm_encrypt.pl
@@ -1,7 +1,22 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
# Based on NIST gcmEncryptIntIVxxx.rsp validation files
# Only first 3 of every set used for compile time saving
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use strict;
diff --git a/tests/scripts/gen_pkcs1_v21_sign_verify.pl b/tests/scripts/gen_pkcs1_v21_sign_verify.pl
index 678e2f9..609e558 100755
--- a/tests/scripts/gen_pkcs1_v21_sign_verify.pl
+++ b/tests/scripts/gen_pkcs1_v21_sign_verify.pl
@@ -1,5 +1,19 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use strict;
diff --git a/tests/scripts/generate-afl-tests.sh b/tests/scripts/generate-afl-tests.sh
index cbc2f59..7c9f432 100755
--- a/tests/scripts/generate-afl-tests.sh
+++ b/tests/scripts/generate-afl-tests.sh
@@ -7,6 +7,21 @@
# Usage: generate-afl-tests.sh <test data file path>
# <test data file path> - should be the path to one of the test suite files
# such as 'test_suite_mpi.data'
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
# Abort on errors
set -e
diff --git a/tests/scripts/generate_code.pl b/tests/scripts/generate_code.pl
deleted file mode 100755
index e489a00..0000000
--- a/tests/scripts/generate_code.pl
+++ /dev/null
@@ -1,411 +0,0 @@
-#!/usr/bin/env perl
-
-# generate_code.pl
-#
-# This file is part of mbed TLS (https://tls.mbed.org)
-#
-# Copyright (c) 2009-2016, ARM Limited, All Rights Reserved
-#
-# Purpose
-#
-# Generates the test suite code given inputs of the test suite directory that
-# contain the test suites, and the test suite file names for the test code and
-# test data.
-#
-# Usage: generate_code.pl <suite dir> <code file> <data file> [main code file]
-#
-# Structure of files
-#
-# - main code file - 'main_test.function'
-# Template file that contains the main() function for the test suite,
-# test dispatch code as well as support functions. It contains the
-# following symbols which are substituted by this script during
-# processing:
-# TESTCASE_FILENAME
-# TESTCODE_FILENAME
-# SUITE_PRE_DEP
-# MAPPING_CODE
-# FUNCTION CODE
-# SUITE_POST_DEP
-# DEP_CHECK_CODE
-# DISPATCH_FUNCTION
-# !LINE_NO!
-#
-# - common helper code file - 'helpers.function'
-# Common helper functions
-#
-# - test suite code file - file name in the form 'test_suite_xxx.function'
-# Code file that contains the actual test cases. The file contains a
-# series of code sequences delimited by the following:
-# BEGIN_HEADER / END_HEADER - list of headers files
-# BEGIN_SUITE_HELPERS / END_SUITE_HELPERS - helper functions common to
-# the test suite
-# BEGIN_CASE / END_CASE - the test cases in the test suite. Each test
-# case contains at least one function that is used to create the
-# dispatch code.
-#
-# - test data file - file name in the form 'test_suite_xxxx.data'
-# The test case parameters to to be used in execution of the test. The
-# file name is used to replace the symbol 'TESTCASE_FILENAME' in the main
-# code file above.
-#
-# A test data file consists of a sequence of paragraphs separated by
-# a single empty line. Line breaks may be in Unix (LF) or Windows (CRLF)
-# format. Lines starting with the character '#' are ignored
-# (the parser behaves as if they were not present).
-#
-# Each paragraph describes one test case and must consist of: (1) one
-# line which is the test case name; (2) an optional line starting with
-# the 11-character prefix "depends_on:"; (3) a line containing the test
-# function to execute and its parameters.
-#
-# A depends_on: line consists of a list of compile-time options
-# separated by the character ':', with no whitespace. The test case
-# is executed only if this compilation option is enabled in config.h.
-#
-# The last line of each paragraph contains a test function name and
-# a list of parameters separated by the character ':'. Running the
-# test case calls this function with the specified parameters. Each
-# parameter may either be an integer written in decimal or hexadecimal,
-# or a string surrounded by double quotes which may not contain the
-# ':' character.
-#
-
-use strict;
-
-my $suite_dir = shift or die "Missing suite directory";
-my $suite_name = shift or die "Missing suite name";
-my $data_name = shift or die "Missing data name";
-my $test_main_file = do { my $arg = shift; defined($arg) ? $arg : $suite_dir."/main_test.function" };
-my $test_file = $data_name.".c";
-my $test_common_helper_file = $suite_dir."/helpers.function";
-my $test_case_file = $suite_dir."/".$suite_name.".function";
-my $test_case_data = $suite_dir."/".$data_name.".data";
-
-my $line_separator = $/;
-undef $/;
-
-
-#
-# Open and read in the input files
-#
-
-open(TEST_HELPERS, "$test_common_helper_file") or die "Opening test helpers
-'$test_common_helper_file': $!";
-my $test_common_helpers = <TEST_HELPERS>;
-close(TEST_HELPERS);
-
-open(TEST_MAIN, "$test_main_file") or die "Opening test main '$test_main_file': $!";
-my @test_main_lines = split/^/, <TEST_MAIN>;
-my $test_main;
-my $index = 2;
-for my $line (@test_main_lines) {
- $line =~ s/!LINE_NO!/$index/;
- $test_main = $test_main.$line;
- $index++;
-}
-close(TEST_MAIN);
-
-open(TEST_CASES, "$test_case_file") or die "Opening test cases '$test_case_file': $!";
-my @test_cases_lines = split/^/, <TEST_CASES>;
-my $test_cases;
-my $index = 2;
-for my $line (@test_cases_lines) {
- if ($line =~ /^\/\* BEGIN_SUITE_HELPERS .*\*\//)
- {
- $line = $line."#line $index \"$test_case_file\"\n";
- }
-
- if ($line =~ /^\/\* BEGIN_CASE .*\*\//)
- {
- $line = $line."#line $index \"$test_case_file\"\n";
- }
-
- $line =~ s/!LINE_NO!/$index/;
-
- $test_cases = $test_cases.$line;
- $index++;
-}
-
-close(TEST_CASES);
-
-open(TEST_DATA, "$test_case_data") or die "Opening test data '$test_case_data': $!";
-my $test_data = <TEST_DATA>;
-close(TEST_DATA);
-
-
-#
-# Find the headers, dependencies, and suites in the test cases file
-#
-
-my ( $suite_header ) = $test_cases =~ /\/\* BEGIN_HEADER \*\/\n(.*?)\n\/\* END_HEADER \*\//s;
-my ( $suite_defines ) = $test_cases =~ /\/\* BEGIN_DEPENDENCIES\n \* (.*?)\n \* END_DEPENDENCIES/s;
-my ( $suite_helpers ) = $test_cases =~ /\/\* BEGIN_SUITE_HELPERS \*\/\n(.*?)\n\/\* END_SUITE_HELPERS \*\//s;
-
-my $requirements;
-if ($suite_defines =~ /^depends_on:/)
-{
- ( $requirements ) = $suite_defines =~ /^depends_on:(.*)$/;
-}
-
-my @var_req_arr = split(/:/, $requirements);
-my $suite_pre_code;
-my $suite_post_code;
-my $dispatch_code;
-my $mapping_code;
-my %mapping_values;
-
-while (@var_req_arr)
-{
- my $req = shift @var_req_arr;
- $req =~ s/(!?)(.*)/$1defined($2)/;
-
- $suite_pre_code .= "#if $req\n";
- $suite_post_code .= "#endif /* $req */\n";
-}
-
-$/ = $line_separator;
-
-open(TEST_FILE, ">$test_file") or die "Opening destination file '$test_file': $!";
-print TEST_FILE << "END";
-/*
- * *** THIS FILE HAS BEEN MACHINE GENERATED ***
- *
- * This file has been machine generated using the script: $0
- *
- * Test file : $test_file
- *
- * The following files were used to create this file.
- *
- * Main code file : $test_main_file
- * Helper file : $test_common_helper_file
- * Test suite file : $test_case_file
- * Test suite data : $test_case_data
- *
- *
- * This file is part of mbed TLS (https://tls.mbed.org)
- */
-
-#if !defined(MBEDTLS_CONFIG_FILE)
-#include <mbedtls/config.h>
-#else
-#include MBEDTLS_CONFIG_FILE
-#endif
-
-
-/*----------------------------------------------------------------------------*/
-/* Common helper code */
-
-$test_common_helpers
-
-
-/*----------------------------------------------------------------------------*/
-/* Test Suite Code */
-
-$suite_pre_code
-$suite_header
-$suite_helpers
-$suite_post_code
-
-END
-
-$test_main =~ s/SUITE_PRE_DEP/$suite_pre_code/;
-$test_main =~ s/SUITE_POST_DEP/$suite_post_code/;
-
-while($test_cases =~ /\/\* BEGIN_CASE *([\w:]*) \*\/\n(.*?)\n\/\* END_CASE \*\//msg)
-{
- my $function_deps = $1;
- my $function_decl = $2;
-
- # Sanity checks of function
- if ($function_decl !~ /^#line\s*.*\nvoid /)
- {
- die "Test function does not have 'void' as return type.\n" .
- "Function declaration:\n" .
- $function_decl;
- }
- if ($function_decl !~ /^(#line\s*.*)\nvoid (\w+)\(\s*(.*?)\s*\)\s*{(.*)}/ms)
- {
- die "Function declaration not in expected format\n";
- }
- my $line_directive = $1;
- my $function_name = $2;
- my $function_params = $3;
- my $function_pre_code;
- my $function_post_code;
- my $param_defs;
- my $param_checks;
- my @dispatch_params;
- my @var_def_arr = split(/,\s*/, $function_params);
- my $i = 1;
- my $mapping_regex = "".$function_name;
- my $mapping_count = 0;
-
- $function_decl =~ s/(^#line\s*.*)\nvoid /$1\nvoid test_suite_/;
-
- # Add exit label if not present
- if ($function_decl !~ /^exit:$/m)
- {
- $function_decl =~ s/}\s*$/\nexit:\n return;\n}/;
- }
-
- if ($function_deps =~ /^depends_on:/)
- {
- ( $function_deps ) = $function_deps =~ /^depends_on:(.*)$/;
- }
-
- foreach my $req (split(/:/, $function_deps))
- {
- $function_pre_code .= "#ifdef $req\n";
- $function_post_code .= "#endif /* $req */\n";
- }
-
- foreach my $def (@var_def_arr)
- {
- # Handle the different parameter types
- if( substr($def, 0, 4) eq "int " )
- {
- $param_defs .= " int param$i;\n";
- $param_checks .= " if( verify_int( params[$i], ¶m$i ) != 0 ) return( DISPATCH_INVALID_TEST_DATA );\n";
- push @dispatch_params, "param$i";
-
- $mapping_regex .= ":([\\d\\w |\\+\\-\\(\\)]+)";
- $mapping_count++;
- }
- elsif( substr($def, 0, 6) eq "char *" )
- {
- $param_defs .= " char *param$i = params[$i];\n";
- $param_checks .= " if( verify_string( ¶m$i ) != 0 ) return( DISPATCH_INVALID_TEST_DATA );\n";
- push @dispatch_params, "param$i";
- $mapping_regex .= ":(?:\\\\.|[^:\n])+";
- }
- else
- {
- die "Parameter declaration not of supported type (int, char *)\n";
- }
- $i++;
-
- }
-
- # Find non-integer values we should map for this function
- if( $mapping_count)
- {
- my @res = $test_data =~ /^$mapping_regex/msg;
- foreach my $value (@res)
- {
- next unless ($value !~ /^\d+$/);
- if ( $mapping_values{$value} ) {
- ${ $mapping_values{$value} }{$function_pre_code} = 1;
- } else {
- $mapping_values{$value} = { $function_pre_code => 1 };
- }
- }
- }
-
- my $call_params = join ", ", @dispatch_params;
- my $param_count = @var_def_arr + 1;
- $dispatch_code .= << "END";
-if( strcmp( params[0], "$function_name" ) == 0 )
-{
-$function_pre_code
-$param_defs
- if( cnt != $param_count )
- {
- mbedtls_fprintf( stderr, "\\nIncorrect argument count (%d != %d)\\n", cnt, $param_count );
- return( DISPATCH_INVALID_TEST_DATA );
- }
-
-$param_checks
- test_suite_$function_name( $call_params );
- return ( DISPATCH_TEST_SUCCESS );
-$function_post_code
- return ( DISPATCH_UNSUPPORTED_SUITE );
-}
-else
-END
-
- my $function_code = $function_pre_code . $function_decl . "\n" .
- $function_post_code;
- $test_main =~ s/FUNCTION_CODE/$function_code\nFUNCTION_CODE/;
-}
-
-# Find specific case dependencies that we should be able to check
-# and make check code
-my $dep_check_code;
-
-my @res = $test_data =~ /^depends_on:([!:\w]+)/msg;
-my %case_deps;
-foreach my $deps (@res)
-{
- foreach my $dep (split(/:/, $deps))
- {
- $case_deps{$dep} = 1;
- }
-}
-while( my ($key, $value) = each(%case_deps) )
-{
- if( substr($key, 0, 1) eq "!" )
- {
- my $key = substr($key, 1);
- $dep_check_code .= << "END";
- if( strcmp( str, "!$key" ) == 0 )
- {
-#if !defined($key)
- return( DEPENDENCY_SUPPORTED );
-#else
- return( DEPENDENCY_NOT_SUPPORTED );
-#endif
- }
-END
- }
- else
- {
- $dep_check_code .= << "END";
- if( strcmp( str, "$key" ) == 0 )
- {
-#if defined($key)
- return( DEPENDENCY_SUPPORTED );
-#else
- return( DEPENDENCY_NOT_SUPPORTED );
-#endif
- }
-END
- }
-}
-
-# Make mapping code
-while( my ($key, $value) = each(%mapping_values) )
-{
- my $key_mapping_code = << "END";
- if( strcmp( str, "$key" ) == 0 )
- {
- *value = ( $key );
- return( KEY_VALUE_MAPPING_FOUND );
- }
-END
-
- # handle depenencies, unless used at least one without depends
- if ($value->{""}) {
- $mapping_code .= $key_mapping_code;
- next;
- }
- for my $ifdef ( keys %$value ) {
- (my $endif = $ifdef) =~ s!ifdef!endif //!g;
- $mapping_code .= $ifdef . $key_mapping_code . $endif;
- }
-}
-
-$dispatch_code =~ s/^(.+)/ $1/mg;
-
-$test_main =~ s/TESTCASE_FILENAME/$test_case_data/g;
-$test_main =~ s/TESTCODE_FILENAME/$test_case_file/g;
-$test_main =~ s/FUNCTION_CODE//;
-$test_main =~ s/DEP_CHECK_CODE/$dep_check_code/;
-$test_main =~ s/DISPATCH_FUNCTION/$dispatch_code/;
-$test_main =~ s/MAPPING_CODE/$mapping_code/;
-
-print TEST_FILE << "END";
-$test_main
-END
-
-close(TEST_FILE);
diff --git a/tests/scripts/generate_psa_tests.py b/tests/scripts/generate_psa_tests.py
new file mode 100755
index 0000000..c788ce6
--- /dev/null
+++ b/tests/scripts/generate_psa_tests.py
@@ -0,0 +1,693 @@
+#!/usr/bin/env python3
+"""Generate test data for PSA cryptographic mechanisms.
+
+With no arguments, generate all test data. With non-option arguments,
+generate only the specified files.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import posixpath
+import re
+import sys
+from typing import Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional, TypeVar
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import build_tree
+from mbedtls_dev import crypto_knowledge
+from mbedtls_dev import macro_collector
+from mbedtls_dev import psa_storage
+from mbedtls_dev import test_case
+
+T = TypeVar('T') #pylint: disable=invalid-name
+
+
+def psa_want_symbol(name: str) -> str:
+ """Return the PSA_WANT_xxx symbol associated with a PSA crypto feature."""
+ if name.startswith('PSA_'):
+ return name[:4] + 'WANT_' + name[4:]
+ else:
+ raise ValueError('Unable to determine the PSA_WANT_ symbol for ' + name)
+
+def finish_family_dependency(dep: str, bits: int) -> str:
+ """Finish dep if it's a family dependency symbol prefix.
+
+ A family dependency symbol prefix is a PSA_WANT_ symbol that needs to be
+ qualified by the key size. If dep is such a symbol, finish it by adjusting
+ the prefix and appending the key size. Other symbols are left unchanged.
+ """
+ return re.sub(r'_FAMILY_(.*)', r'_\1_' + str(bits), dep)
+
+def finish_family_dependencies(dependencies: List[str], bits: int) -> List[str]:
+ """Finish any family dependency symbol prefixes.
+
+ Apply `finish_family_dependency` to each element of `dependencies`.
+ """
+ return [finish_family_dependency(dep, bits) for dep in dependencies]
+
+SYMBOLS_WITHOUT_DEPENDENCY = frozenset([
+ 'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', # modifier, only in policies
+ 'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # modifier
+ 'PSA_ALG_ANY_HASH', # only in policies
+ 'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', # modifier, only in policies
+ 'PSA_ALG_KEY_AGREEMENT', # chaining
+ 'PSA_ALG_TRUNCATED_MAC', # modifier
+])
+def automatic_dependencies(*expressions: str) -> List[str]:
+ """Infer dependencies of a test case by looking for PSA_xxx symbols.
+
+ The arguments are strings which should be C expressions. Do not use
+ string literals or comments as this function is not smart enough to
+ skip them.
+ """
+ used = set()
+ for expr in expressions:
+ used.update(re.findall(r'PSA_(?:ALG|ECC_FAMILY|KEY_TYPE)_\w+', expr))
+ used.difference_update(SYMBOLS_WITHOUT_DEPENDENCY)
+ return sorted(psa_want_symbol(name) for name in used)
+
+# A temporary hack: at the time of writing, not all dependency symbols
+# are implemented yet. Skip test cases for which the dependency symbols are
+# not available. Once all dependency symbols are available, this hack must
+# be removed so that a bug in the dependency symbols proprely leads to a test
+# failure.
+def read_implemented_dependencies(filename: str) -> FrozenSet[str]:
+ return frozenset(symbol
+ for line in open(filename)
+ for symbol in re.findall(r'\bPSA_WANT_\w+\b', line))
+_implemented_dependencies = None #type: Optional[FrozenSet[str]] #pylint: disable=invalid-name
+def hack_dependencies_not_implemented(dependencies: List[str]) -> None:
+ global _implemented_dependencies #pylint: disable=global-statement,invalid-name
+ if _implemented_dependencies is None:
+ _implemented_dependencies = \
+ read_implemented_dependencies('include/psa/crypto_config.h')
+ if not all(dep.lstrip('!') in _implemented_dependencies
+ for dep in dependencies):
+ dependencies.append('DEPENDENCY_NOT_IMPLEMENTED_YET')
+
+
+class Information:
+ """Gather information about PSA constructors."""
+
+ def __init__(self) -> None:
+ self.constructors = self.read_psa_interface()
+
+ @staticmethod
+ def remove_unwanted_macros(
+ constructors: macro_collector.PSAMacroEnumerator
+ ) -> None:
+ # Mbed TLS doesn't support finite-field DH yet and will not support
+ # finite-field DSA. Don't attempt to generate any related test case.
+ constructors.key_types.discard('PSA_KEY_TYPE_DH_KEY_PAIR')
+ constructors.key_types.discard('PSA_KEY_TYPE_DH_PUBLIC_KEY')
+ constructors.key_types.discard('PSA_KEY_TYPE_DSA_KEY_PAIR')
+ constructors.key_types.discard('PSA_KEY_TYPE_DSA_PUBLIC_KEY')
+
+ def read_psa_interface(self) -> macro_collector.PSAMacroEnumerator:
+ """Return the list of known key types, algorithms, etc."""
+ constructors = macro_collector.InputsForTest()
+ header_file_names = ['include/psa/crypto_values.h',
+ 'include/psa/crypto_extra.h']
+ test_suites = ['tests/suites/test_suite_psa_crypto_metadata.data']
+ for header_file_name in header_file_names:
+ constructors.parse_header(header_file_name)
+ for test_cases in test_suites:
+ constructors.parse_test_cases(test_cases)
+ self.remove_unwanted_macros(constructors)
+ constructors.gather_arguments()
+ return constructors
+
+
+def test_case_for_key_type_not_supported(
+ verb: str, key_type: str, bits: int,
+ dependencies: List[str],
+ *args: str,
+ param_descr: str = ''
+) -> test_case.TestCase:
+ """Return one test case exercising a key creation method
+ for an unsupported key type or size.
+ """
+ hack_dependencies_not_implemented(dependencies)
+ tc = test_case.TestCase()
+ short_key_type = re.sub(r'PSA_(KEY_TYPE|ECC_FAMILY)_', r'', key_type)
+ adverb = 'not' if dependencies else 'never'
+ if param_descr:
+ adverb = param_descr + ' ' + adverb
+ tc.set_description('PSA {} {} {}-bit {} supported'
+ .format(verb, short_key_type, bits, adverb))
+ tc.set_dependencies(dependencies)
+ tc.set_function(verb + '_not_supported')
+ tc.set_arguments([key_type] + list(args))
+ return tc
+
+class NotSupported:
+ """Generate test cases for when something is not supported."""
+
+ def __init__(self, info: Information) -> None:
+ self.constructors = info.constructors
+
+ ALWAYS_SUPPORTED = frozenset([
+ 'PSA_KEY_TYPE_DERIVE',
+ 'PSA_KEY_TYPE_RAW_DATA',
+ ])
+ def test_cases_for_key_type_not_supported(
+ self,
+ kt: crypto_knowledge.KeyType,
+ param: Optional[int] = None,
+ param_descr: str = '',
+ ) -> Iterator[test_case.TestCase]:
+ """Return test cases exercising key creation when the given type is unsupported.
+
+ If param is present and not None, emit test cases conditioned on this
+ parameter not being supported. If it is absent or None, emit test cases
+ conditioned on the base type not being supported.
+ """
+ if kt.name in self.ALWAYS_SUPPORTED:
+ # Don't generate test cases for key types that are always supported.
+ # They would be skipped in all configurations, which is noise.
+ return
+ import_dependencies = [('!' if param is None else '') +
+ psa_want_symbol(kt.name)]
+ if kt.params is not None:
+ import_dependencies += [('!' if param == i else '') +
+ psa_want_symbol(sym)
+ for i, sym in enumerate(kt.params)]
+ if kt.name.endswith('_PUBLIC_KEY'):
+ generate_dependencies = []
+ else:
+ generate_dependencies = import_dependencies
+ for bits in kt.sizes_to_test():
+ yield test_case_for_key_type_not_supported(
+ 'import', kt.expression, bits,
+ finish_family_dependencies(import_dependencies, bits),
+ test_case.hex_string(kt.key_material(bits)),
+ param_descr=param_descr,
+ )
+ if not generate_dependencies and param is not None:
+ # If generation is impossible for this key type, rather than
+ # supported or not depending on implementation capabilities,
+ # only generate the test case once.
+ continue
+ yield test_case_for_key_type_not_supported(
+ 'generate', kt.expression, bits,
+ finish_family_dependencies(generate_dependencies, bits),
+ str(bits),
+ param_descr=param_descr,
+ )
+ # To be added: derive
+
+ ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
+ 'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
+
+ def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
+ """Generate test cases that exercise the creation of keys of unsupported types."""
+ for key_type in sorted(self.constructors.key_types):
+ if key_type in self.ECC_KEY_TYPES:
+ continue
+ kt = crypto_knowledge.KeyType(key_type)
+ yield from self.test_cases_for_key_type_not_supported(kt)
+ for curve_family in sorted(self.constructors.ecc_curves):
+ for constr in self.ECC_KEY_TYPES:
+ kt = crypto_knowledge.KeyType(constr, [curve_family])
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, param_descr='type')
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, 0, param_descr='curve')
+
+
+class StorageKey(psa_storage.Key):
+ """Representation of a key for storage format testing."""
+
+ IMPLICIT_USAGE_FLAGS = {
+ 'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
+ 'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
+ } #type: Dict[str, str]
+ """Mapping of usage flags to the flags that they imply."""
+
+ def __init__(
+ self,
+ usage: str,
+ without_implicit_usage: Optional[bool] = False,
+ **kwargs
+ ) -> None:
+ """Prepare to generate a key.
+
+ * `usage` : The usage flags used for the key.
+ * `without_implicit_usage`: Flag to defide to apply the usage extension
+ """
+ super().__init__(usage=usage, **kwargs)
+
+ if not without_implicit_usage:
+ for flag, implicit in self.IMPLICIT_USAGE_FLAGS.items():
+ if self.usage.value() & psa_storage.Expr(flag).value() and \
+ self.usage.value() & psa_storage.Expr(implicit).value() == 0:
+ self.usage = psa_storage.Expr(self.usage.string + ' | ' + implicit)
+
+class StorageTestData(StorageKey):
+ """Representation of test case data for storage format testing."""
+
+ def __init__(
+ self,
+ description: str,
+ expected_usage: Optional[str] = None,
+ **kwargs
+ ) -> None:
+ """Prepare to generate test data
+
+ * `description` : used for the the test case names
+ * `expected_usage`: the usage flags generated as the expected usage flags
+ in the test cases. CAn differ from the usage flags
+ stored in the keys because of the usage flags extension.
+ """
+ super().__init__(**kwargs)
+ self.description = description #type: str
+ self.expected_usage = expected_usage if expected_usage else self.usage.string #type: str
+
+class StorageFormat:
+ """Storage format stability test cases."""
+
+ def __init__(self, info: Information, version: int, forward: bool) -> None:
+ """Prepare to generate test cases for storage format stability.
+
+ * `info`: information about the API. See the `Information` class.
+ * `version`: the storage format version to generate test cases for.
+ * `forward`: if true, generate forward compatibility test cases which
+ save a key and check that its representation is as intended. Otherwise
+ generate backward compatibility test cases which inject a key
+ representation and check that it can be read and used.
+ """
+ self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
+ self.version = version #type: int
+ self.forward = forward #type: bool
+
+ def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
+ """Construct a storage format test case for the given key.
+
+ If ``forward`` is true, generate a forward compatibility test case:
+ create a key and validate that it has the expected representation.
+ Otherwise generate a backward compatibility test case: inject the
+ key representation into storage and validate that it can be read
+ correctly.
+ """
+ verb = 'save' if self.forward else 'read'
+ tc = test_case.TestCase()
+ tc.set_description('PSA storage {}: {}'.format(verb, key.description))
+ dependencies = automatic_dependencies(
+ key.lifetime.string, key.type.string,
+ key.expected_usage, key.alg.string, key.alg2.string,
+ )
+ dependencies = finish_family_dependencies(dependencies, key.bits)
+ tc.set_dependencies(dependencies)
+ tc.set_function('key_storage_' + verb)
+ if self.forward:
+ extra_arguments = []
+ else:
+ flags = []
+ # Some test keys have the RAW_DATA type and attributes that don't
+ # necessarily make sense. We do this to validate numerical
+ # encodings of the attributes.
+ # Raw data keys have no useful exercise anyway so there is no
+ # loss of test coverage.
+ if key.type.string != 'PSA_KEY_TYPE_RAW_DATA':
+ flags.append('TEST_FLAG_EXERCISE')
+ if 'READ_ONLY' in key.lifetime.string:
+ flags.append('TEST_FLAG_READ_ONLY')
+ extra_arguments = [' | '.join(flags) if flags else '0']
+ tc.set_arguments([key.lifetime.string,
+ key.type.string, str(key.bits),
+ key.expected_usage, key.alg.string, key.alg2.string,
+ '"' + key.material.hex() + '"',
+ '"' + key.hex() + '"',
+ *extra_arguments])
+ return tc
+
+ def key_for_lifetime(
+ self,
+ lifetime: str,
+ ) -> StorageTestData:
+ """Construct a test key for the given lifetime."""
+ short = lifetime
+ short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
+ r'', short)
+ short = re.sub(r'PSA_KEY_[A-Z]+_', r'', short)
+ description = 'lifetime: ' + short
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=lifetime,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage='PSA_KEY_USAGE_EXPORT', alg=0, alg2=0,
+ material=b'L',
+ description=description)
+ return key
+
+ def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering lifetimes."""
+ lifetimes = sorted(self.constructors.lifetimes)
+ expressions = self.constructors.generate_expressions(lifetimes)
+ for lifetime in expressions:
+ # Don't attempt to create or load a volatile key in storage
+ if 'VOLATILE' in lifetime:
+ continue
+ # Don't attempt to create a read-only key in storage,
+ # but do attempt to load one.
+ if 'READ_ONLY' in lifetime and self.forward:
+ continue
+ yield self.key_for_lifetime(lifetime)
+
+ def keys_for_usage_flags(
+ self,
+ usage_flags: List[str],
+ short: Optional[str] = None,
+ test_implicit_usage: Optional[bool] = False
+ ) -> Iterator[StorageTestData]:
+ """Construct a test key for the given key usage."""
+ usage = ' | '.join(usage_flags) if usage_flags else '0'
+ if short is None:
+ short = re.sub(r'\bPSA_KEY_USAGE_', r'', usage)
+ extra_desc = ' with implication' if test_implicit_usage else ''
+ description = 'usage' + extra_desc + ': ' + short
+ key1 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ expected_usage=usage,
+ usage=usage, alg=0, alg2=0,
+ material=b'K',
+ description=description)
+ yield key1
+
+ if test_implicit_usage:
+ description = 'usage without implication' + ': ' + short
+ key2 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ without_implicit_usage=True,
+ usage=usage, alg=0, alg2=0,
+ material=b'K',
+ description=description)
+ yield key2
+
+ def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
+ """Generate test keys covering usage flags."""
+ known_flags = sorted(self.constructors.key_usage_flags)
+ yield from self.keys_for_usage_flags(['0'], **kwargs)
+ for usage_flag in known_flags:
+ yield from self.keys_for_usage_flags([usage_flag], **kwargs)
+ for flag1, flag2 in zip(known_flags,
+ known_flags[1:] + [known_flags[0]]):
+ yield from self.keys_for_usage_flags([flag1, flag2], **kwargs)
+
+ def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
+ known_flags = sorted(self.constructors.key_usage_flags)
+ yield from self.keys_for_usage_flags(known_flags, short='all known')
+
+ def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+ yield from self.generate_keys_for_usage_flags()
+ yield from self.generate_key_for_all_usage_flags()
+
+ def keys_for_type(
+ self,
+ key_type: str,
+ params: Optional[Iterable[str]] = None
+ ) -> Iterator[StorageTestData]:
+ """Generate test keys for the given key type.
+
+ For key types that depend on a parameter (e.g. elliptic curve family),
+ `param` is the parameter to pass to the constructor. Only a single
+ parameter is supported.
+ """
+ kt = crypto_knowledge.KeyType(key_type, params)
+ for bits in kt.sizes_to_test():
+ usage_flags = 'PSA_KEY_USAGE_EXPORT'
+ alg = 0
+ alg2 = 0
+ key_material = kt.key_material(bits)
+ short_expression = re.sub(r'\bPSA_(?:KEY_TYPE|ECC_FAMILY)_',
+ r'',
+ kt.expression)
+ description = 'type: {} {}-bit'.format(short_expression, bits)
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type=kt.expression, bits=bits,
+ usage=usage_flags, alg=alg, alg2=alg2,
+ material=key_material,
+ description=description)
+ yield key
+
+ def all_keys_for_types(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering key types and their representations."""
+ key_types = sorted(self.constructors.key_types)
+ for key_type in self.constructors.generate_expressions(key_types):
+ yield from self.keys_for_type(key_type)
+
+ def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
+ """Generate test keys for the specified algorithm."""
+ # For now, we don't have information on the compatibility of key
+ # types and algorithms. So we just test the encoding of algorithms,
+ # and not that operations can be performed with them.
+ descr = re.sub(r'PSA_ALG_', r'', alg)
+ descr = re.sub(r',', r', ', re.sub(r' +', r'', descr))
+ usage = 'PSA_KEY_USAGE_EXPORT'
+ key1 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage=usage, alg=alg, alg2=0,
+ material=b'K',
+ description='alg: ' + descr)
+ yield key1
+ key2 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage=usage, alg=0, alg2=alg,
+ material=b'L',
+ description='alg2: ' + descr)
+ yield key2
+
+ def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering algorithm encodings."""
+ algorithms = sorted(self.constructors.algorithms)
+ for alg in self.constructors.generate_expressions(algorithms):
+ yield from self.keys_for_algorithm(alg)
+
+ def generate_all_keys(self) -> Iterator[StorageTestData]:
+ """Generate all keys for the test cases."""
+ yield from self.all_keys_for_lifetimes()
+ yield from self.all_keys_for_usage_flags()
+ yield from self.all_keys_for_types()
+ yield from self.all_keys_for_algorithms()
+
+ def all_test_cases(self) -> Iterator[test_case.TestCase]:
+ """Generate all storage format test cases."""
+ # First build a list of all keys, then construct all the corresponding
+ # test cases. This allows all required information to be obtained in
+ # one go, which is a significant performance gain as the information
+ # includes numerical values obtained by compiling a C program.
+ all_keys = list(self.generate_all_keys())
+ for key in all_keys:
+ if key.location_value() != 0:
+ # Skip keys with a non-default location, because they
+ # require a driver and we currently have no mechanism to
+ # determine whether a driver is available.
+ continue
+ yield self.make_test_case(key)
+
+class StorageFormatForward(StorageFormat):
+ """Storage format stability test cases for forward compatibility."""
+
+ def __init__(self, info: Information, version: int) -> None:
+ super().__init__(info, version, True)
+
+class StorageFormatV0(StorageFormat):
+ """Storage format stability test cases for version 0 compatibility."""
+
+ def __init__(self, info: Information) -> None:
+ super().__init__(info, 0, False)
+
+ def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering usage flags."""
+ yield from self.generate_keys_for_usage_flags(test_implicit_usage=True)
+ yield from self.generate_key_for_all_usage_flags()
+
+ def keys_for_implicit_usage(
+ self,
+ implyer_usage: str,
+ alg: str,
+ key_type: crypto_knowledge.KeyType
+ ) -> StorageTestData:
+ # pylint: disable=too-many-locals
+ """Generate test keys for the specified implicit usage flag,
+ algorithm and key type combination.
+ """
+ bits = key_type.sizes_to_test()[0]
+ implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
+ usage_flags = 'PSA_KEY_USAGE_EXPORT'
+ material_usage_flags = usage_flags + ' | ' + implyer_usage
+ expected_usage_flags = material_usage_flags + ' | ' + implicit_usage
+ alg2 = 0
+ key_material = key_type.key_material(bits)
+ usage_expression = re.sub(r'PSA_KEY_USAGE_', r'', implyer_usage)
+ alg_expression = re.sub(r'PSA_ALG_', r'', alg)
+ alg_expression = re.sub(r',', r', ', re.sub(r' +', r'', alg_expression))
+ key_type_expression = re.sub(r'\bPSA_(?:KEY_TYPE|ECC_FAMILY)_',
+ r'',
+ key_type.expression)
+ description = 'implied by {}: {} {} {}-bit'.format(
+ usage_expression, alg_expression, key_type_expression, bits)
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type=key_type.expression, bits=bits,
+ usage=material_usage_flags,
+ expected_usage=expected_usage_flags,
+ without_implicit_usage=True,
+ alg=alg, alg2=alg2,
+ material=key_material,
+ description=description)
+ return key
+
+ def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
+ # pylint: disable=too-many-locals
+ """Match possible key types for sign algorithms."""
+ # To create a valid combinaton both the algorithms and key types
+ # must be filtered. Pair them with keywords created from its names.
+ incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
+ incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
+ keyword_translation = {
+ 'ECDSA': 'ECC',
+ 'ED[0-9]*.*' : 'EDWARDS'
+ }
+ exclusive_keywords = {
+ 'EDWARDS': 'ECC'
+ }
+ key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
+ algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
+ alg_with_keys = {} #type: Dict[str, List[str]]
+ translation_table = str.maketrans('(', '_', ')')
+ for alg in algorithms:
+ # Generate keywords from the name of the algorithm
+ alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
+ # Translate keywords for better matching with the key types
+ for keyword in alg_keywords.copy():
+ for pattern, replace in keyword_translation.items():
+ if re.match(pattern, keyword):
+ alg_keywords.remove(keyword)
+ alg_keywords.add(replace)
+ # Filter out incompatible algortihms
+ if not alg_keywords.isdisjoint(incompatible_alg_keyword):
+ continue
+
+ for key_type in key_types:
+ # Generate keywords from the of the key type
+ key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
+
+ # Remove ambigious keywords
+ for keyword1, keyword2 in exclusive_keywords.items():
+ if keyword1 in key_type_keywords:
+ key_type_keywords.remove(keyword2)
+
+ if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
+ not key_type_keywords.isdisjoint(alg_keywords):
+ if alg in alg_with_keys:
+ alg_with_keys[alg].append(key_type)
+ else:
+ alg_with_keys[alg] = [key_type]
+ return alg_with_keys
+
+ def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
+ """Generate test keys for usage flag extensions."""
+ # Generate a key type and algorithm pair for each extendable usage
+ # flag to generate a valid key for exercising. The key is generated
+ # without usage extension to check the extension compatiblity.
+ alg_with_keys = self.gather_key_types_for_sign_alg()
+
+ for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
+ for alg in sorted(alg_with_keys):
+ for key_type in sorted(alg_with_keys[alg]):
+ # The key types must be filtered to fit the specific usage flag.
+ kt = crypto_knowledge.KeyType(key_type)
+ if kt.is_valid_for_signature(usage):
+ yield self.keys_for_implicit_usage(usage, alg, kt)
+
+ def generate_all_keys(self) -> Iterator[StorageTestData]:
+ yield from super().generate_all_keys()
+ yield from self.all_keys_for_implicit_usage()
+
+class TestGenerator:
+ """Generate test data."""
+
+ def __init__(self, options) -> None:
+ self.test_suite_directory = self.get_option(options, 'directory',
+ 'tests/suites')
+ self.info = Information()
+
+ @staticmethod
+ def get_option(options, name: str, default: T) -> T:
+ value = getattr(options, name, None)
+ return default if value is None else value
+
+ def filename_for(self, basename: str) -> str:
+ """The location of the data file with the specified base name."""
+ return posixpath.join(self.test_suite_directory, basename + '.data')
+
+ def write_test_data_file(self, basename: str,
+ test_cases: Iterable[test_case.TestCase]) -> None:
+ """Write the test cases to a .data file.
+
+ The output file is ``basename + '.data'`` in the test suite directory.
+ """
+ filename = self.filename_for(basename)
+ test_case.write_data_file(filename, test_cases)
+
+ TARGETS = {
+ 'test_suite_psa_crypto_not_supported.generated':
+ lambda info: NotSupported(info).test_cases_for_not_supported(),
+ 'test_suite_psa_crypto_storage_format.current':
+ lambda info: StorageFormatForward(info, 0).all_test_cases(),
+ 'test_suite_psa_crypto_storage_format.v0':
+ lambda info: StorageFormatV0(info).all_test_cases(),
+ } #type: Dict[str, Callable[[Information], Iterable[test_case.TestCase]]]
+
+ def generate_target(self, name: str) -> None:
+ test_cases = self.TARGETS[name](self.info)
+ self.write_test_data_file(name, test_cases)
+
+def main(args):
+ """Command line entry point."""
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--list', action='store_true',
+ help='List available targets and exit')
+ parser.add_argument('targets', nargs='*', metavar='TARGET',
+ help='Target file to generate (default: all; "-": none)')
+ options = parser.parse_args(args)
+ build_tree.chdir_to_root()
+ generator = TestGenerator(options)
+ if options.list:
+ for name in sorted(generator.TARGETS):
+ print(generator.filename_for(name))
+ return
+ if options.targets:
+ # Allow "-" as a special case so you can run
+ # ``generate_psa_tests.py - $targets`` and it works uniformly whether
+ # ``$targets`` is empty or not.
+ options.targets = [os.path.basename(re.sub(r'\.data\Z', r'', target))
+ for target in options.targets
+ if target != '-']
+ else:
+ options.targets = sorted(generator.TARGETS)
+ for target in options.targets:
+ generator.generate_target(target)
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tests/scripts/generate_test_code.py b/tests/scripts/generate_test_code.py
new file mode 100755
index 0000000..7382fb6
--- /dev/null
+++ b/tests/scripts/generate_test_code.py
@@ -0,0 +1,1149 @@
+#!/usr/bin/env python3
+# Test suites code generator.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This script is a key part of Mbed TLS test suites framework. For
+understanding the script it is important to understand the
+framework. This doc string contains a summary of the framework
+and explains the function of this script.
+
+Mbed TLS test suites:
+=====================
+Scope:
+------
+The test suites focus on unit testing the crypto primitives and also
+include x509 parser tests. Tests can be added to test any Mbed TLS
+module. However, the framework is not capable of testing SSL
+protocol, since that requires full stack execution and that is best
+tested as part of the system test.
+
+Test case definition:
+---------------------
+Tests are defined in a test_suite_<module>[.<optional sub module>].data
+file. A test definition contains:
+ test name
+ optional build macro dependencies
+ test function
+ test parameters
+
+Test dependencies are build macros that can be specified to indicate
+the build config in which the test is valid. For example if a test
+depends on a feature that is only enabled by defining a macro. Then
+that macro should be specified as a dependency of the test.
+
+Test function is the function that implements the test steps. This
+function is specified for different tests that perform same steps
+with different parameters.
+
+Test parameters are specified in string form separated by ':'.
+Parameters can be of type string, binary data specified as hex
+string and integer constants specified as integer, macro or
+as an expression. Following is an example test definition:
+
+ AES 128 GCM Encrypt and decrypt 8 bytes
+ depends_on:MBEDTLS_AES_C:MBEDTLS_GCM_C
+ enc_dec_buf:MBEDTLS_CIPHER_AES_128_GCM:"AES-128-GCM":128:8:-1
+
+Test functions:
+---------------
+Test functions are coded in C in test_suite_<module>.function files.
+Functions file is itself not compilable and contains special
+format patterns to specify test suite dependencies, start and end
+of functions and function dependencies. Check any existing functions
+file for example.
+
+Execution:
+----------
+Tests are executed in 3 steps:
+- Generating test_suite_<module>[.<optional sub module>].c file
+ for each corresponding .data file.
+- Building each source file into executables.
+- Running each executable and printing report.
+
+Generating C test source requires more than just the test functions.
+Following extras are required:
+- Process main()
+- Reading .data file and dispatching test cases.
+- Platform specific test case execution
+- Dependency checking
+- Integer expression evaluation
+- Test function dispatch
+
+Build dependencies and integer expressions (in the test parameters)
+are specified as strings in the .data file. Their run time value is
+not known at the generation stage. Hence, they need to be translated
+into run time evaluations. This script generates the run time checks
+for dependencies and integer expressions.
+
+Similarly, function names have to be translated into function calls.
+This script also generates code for function dispatch.
+
+The extra code mentioned here is either generated by this script
+or it comes from the input files: helpers file, platform file and
+the template file.
+
+Helper file:
+------------
+Helpers file contains common helper/utility functions and data.
+
+Platform file:
+--------------
+Platform file contains platform specific setup code and test case
+dispatch code. For example, host_test.function reads test data
+file from host's file system and dispatches tests.
+In case of on-target target_test.function tests are not dispatched
+on target. Target code is kept minimum and only test functions are
+dispatched. Test case dispatch is done on the host using tools like
+Greentea.
+
+Template file:
+---------
+Template file for example main_test.function is a template C file in
+which generated code and code from input files is substituted to
+generate a compilable C file. It also contains skeleton functions for
+dependency checks, expression evaluation and function dispatch. These
+functions are populated with checks and return codes by this script.
+
+Template file contains "replacement" fields that are formatted
+strings processed by Python string.Template.substitute() method.
+
+This script:
+============
+Core function of this script is to fill the template file with
+code that is generated or read from helpers and platform files.
+
+This script replaces following fields in the template and generates
+the test source file:
+
+$test_common_helpers <-- All common code from helpers.function
+ is substituted here.
+$functions_code <-- Test functions are substituted here
+ from the input test_suit_xyz.function
+ file. C preprocessor checks are generated
+ for the build dependencies specified
+ in the input file. This script also
+ generates wrappers for the test
+ functions with code to expand the
+ string parameters read from the data
+ file.
+$expression_code <-- This script enumerates the
+ expressions in the .data file and
+ generates code to handle enumerated
+ expression Ids and return the values.
+$dep_check_code <-- This script enumerates all
+ build dependencies and generate
+ code to handle enumerated build
+ dependency Id and return status: if
+ the dependency is defined or not.
+$dispatch_code <-- This script enumerates the functions
+ specified in the input test data file
+ and generates the initializer for the
+ function table in the template
+ file.
+$platform_code <-- Platform specific setup and test
+ dispatch code.
+
+"""
+
+
+import io
+import os
+import re
+import sys
+import string
+import argparse
+
+
+BEGIN_HEADER_REGEX = r'/\*\s*BEGIN_HEADER\s*\*/'
+END_HEADER_REGEX = r'/\*\s*END_HEADER\s*\*/'
+
+BEGIN_SUITE_HELPERS_REGEX = r'/\*\s*BEGIN_SUITE_HELPERS\s*\*/'
+END_SUITE_HELPERS_REGEX = r'/\*\s*END_SUITE_HELPERS\s*\*/'
+
+BEGIN_DEP_REGEX = r'BEGIN_DEPENDENCIES'
+END_DEP_REGEX = r'END_DEPENDENCIES'
+
+BEGIN_CASE_REGEX = r'/\*\s*BEGIN_CASE\s*(?P<depends_on>.*?)\s*\*/'
+END_CASE_REGEX = r'/\*\s*END_CASE\s*\*/'
+
+DEPENDENCY_REGEX = r'depends_on:(?P<dependencies>.*)'
+C_IDENTIFIER_REGEX = r'!?[a-z_][a-z0-9_]*'
+CONDITION_OPERATOR_REGEX = r'[!=]=|[<>]=?'
+# forbid 0ddd which might be accidentally octal or accidentally decimal
+CONDITION_VALUE_REGEX = r'[-+]?(0x[0-9a-f]+|0|[1-9][0-9]*)'
+CONDITION_REGEX = r'({})(?:\s*({})\s*({}))?$'.format(C_IDENTIFIER_REGEX,
+ CONDITION_OPERATOR_REGEX,
+ CONDITION_VALUE_REGEX)
+TEST_FUNCTION_VALIDATION_REGEX = r'\s*void\s+(?P<func_name>\w+)\s*\('
+INT_CHECK_REGEX = r'int\s+.*'
+CHAR_CHECK_REGEX = r'char\s*\*\s*.*'
+DATA_T_CHECK_REGEX = r'data_t\s*\*\s*.*'
+FUNCTION_ARG_LIST_END_REGEX = r'.*\)'
+EXIT_LABEL_REGEX = r'^exit:'
+
+
+class GeneratorInputError(Exception):
+ """
+ Exception to indicate error in the input files to this script.
+ This includes missing patterns, test function names and other
+ parsing errors.
+ """
+ pass
+
+
+class FileWrapper(io.FileIO):
+ """
+ This class extends built-in io.FileIO class with attribute line_no,
+ that indicates line number for the line that is read.
+ """
+
+ def __init__(self, file_name):
+ """
+ Instantiate the base class and initialize the line number to 0.
+
+ :param file_name: File path to open.
+ """
+ super(FileWrapper, self).__init__(file_name, 'r')
+ self._line_no = 0
+
+ def next(self):
+ """
+ Python 2 iterator method. This method overrides base class's
+ next method and extends the next method to count the line
+ numbers as each line is read.
+
+ It works for both Python 2 and Python 3 by checking iterator
+ method name in the base iterator object.
+
+ :return: Line read from file.
+ """
+ parent = super(FileWrapper, self)
+ if hasattr(parent, '__next__'):
+ line = parent.__next__() # Python 3
+ else:
+ line = parent.next() # Python 2 # pylint: disable=no-member
+ if line is not None:
+ self._line_no += 1
+ # Convert byte array to string with correct encoding and
+ # strip any whitespaces added in the decoding process.
+ return line.decode(sys.getdefaultencoding()).rstrip() + '\n'
+ return None
+
+ # Python 3 iterator method
+ __next__ = next
+
+ def get_line_no(self):
+ """
+ Gives current line number.
+ """
+ return self._line_no
+
+ line_no = property(get_line_no)
+
+
+def split_dep(dep):
+ """
+ Split NOT character '!' from dependency. Used by gen_dependencies()
+
+ :param dep: Dependency list
+ :return: string tuple. Ex: ('!', MACRO) for !MACRO and ('', MACRO) for
+ MACRO.
+ """
+ return ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+
+
+def gen_dependencies(dependencies):
+ """
+ Test suite data and functions specifies compile time dependencies.
+ This function generates C preprocessor code from the input
+ dependency list. Caller uses the generated preprocessor code to
+ wrap dependent code.
+ A dependency in the input list can have a leading '!' character
+ to negate a condition. '!' is separated from the dependency using
+ function split_dep() and proper preprocessor check is generated
+ accordingly.
+
+ :param dependencies: List of dependencies.
+ :return: if defined and endif code with macro annotations for
+ readability.
+ """
+ dep_start = ''.join(['#if %sdefined(%s)\n' % (x, y) for x, y in
+ map(split_dep, dependencies)])
+ dep_end = ''.join(['#endif /* %s */\n' %
+ x for x in reversed(dependencies)])
+
+ return dep_start, dep_end
+
+
+def gen_dependencies_one_line(dependencies):
+ """
+ Similar to gen_dependencies() but generates dependency checks in one line.
+ Useful for generating code with #else block.
+
+ :param dependencies: List of dependencies.
+ :return: Preprocessor check code
+ """
+ defines = '#if ' if dependencies else ''
+ defines += ' && '.join(['%sdefined(%s)' % (x, y) for x, y in map(
+ split_dep, dependencies)])
+ return defines
+
+
+def gen_function_wrapper(name, local_vars, args_dispatch):
+ """
+ Creates test function wrapper code. A wrapper has the code to
+ unpack parameters from parameters[] array.
+
+ :param name: Test function name
+ :param local_vars: Local variables declaration code
+ :param args_dispatch: List of dispatch arguments.
+ Ex: ['(char *)params[0]', '*((int *)params[1])']
+ :return: Test function wrapper.
+ """
+ # Then create the wrapper
+ wrapper = '''
+void {name}_wrapper( void ** params )
+{{
+{unused_params}{locals}
+ {name}( {args} );
+}}
+'''.format(name=name,
+ unused_params='' if args_dispatch else ' (void)params;\n',
+ args=', '.join(args_dispatch),
+ locals=local_vars)
+ return wrapper
+
+
+def gen_dispatch(name, dependencies):
+ """
+ Test suite code template main_test.function defines a C function
+ array to contain test case functions. This function generates an
+ initializer entry for a function in that array. The entry is
+ composed of a compile time check for the test function
+ dependencies. At compile time the test function is assigned when
+ dependencies are met, else NULL is assigned.
+
+ :param name: Test function name
+ :param dependencies: List of dependencies
+ :return: Dispatch code.
+ """
+ if dependencies:
+ preprocessor_check = gen_dependencies_one_line(dependencies)
+ dispatch_code = '''
+{preprocessor_check}
+ {name}_wrapper,
+#else
+ NULL,
+#endif
+'''.format(preprocessor_check=preprocessor_check, name=name)
+ else:
+ dispatch_code = '''
+ {name}_wrapper,
+'''.format(name=name)
+
+ return dispatch_code
+
+
+def parse_until_pattern(funcs_f, end_regex):
+ """
+ Matches pattern end_regex to the lines read from the file object.
+ Returns the lines read until end pattern is matched.
+
+ :param funcs_f: file object for .function file
+ :param end_regex: Pattern to stop parsing
+ :return: Lines read before the end pattern
+ """
+ headers = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+ for line in funcs_f:
+ if re.search(end_regex, line):
+ break
+ headers += line
+ else:
+ raise GeneratorInputError("file: %s - end pattern [%s] not found!" %
+ (funcs_f.name, end_regex))
+
+ return headers
+
+
+def validate_dependency(dependency):
+ """
+ Validates a C macro and raises GeneratorInputError on invalid input.
+ :param dependency: Input macro dependency
+ :return: input dependency stripped of leading & trailing white spaces.
+ """
+ dependency = dependency.strip()
+ if not re.match(CONDITION_REGEX, dependency, re.I):
+ raise GeneratorInputError('Invalid dependency %s' % dependency)
+ return dependency
+
+
+def parse_dependencies(inp_str):
+ """
+ Parses dependencies out of inp_str, validates them and returns a
+ list of macros.
+
+ :param inp_str: Input string with macros delimited by ':'.
+ :return: list of dependencies
+ """
+ dependencies = list(map(validate_dependency, inp_str.split(':')))
+ return dependencies
+
+
+def parse_suite_dependencies(funcs_f):
+ """
+ Parses test suite dependencies specified at the top of a
+ .function file, that starts with pattern BEGIN_DEPENDENCIES
+ and end with END_DEPENDENCIES. Dependencies are specified
+ after pattern 'depends_on:' and are delimited by ':'.
+
+ :param funcs_f: file object for .function file
+ :return: List of test suite dependencies.
+ """
+ dependencies = []
+ for line in funcs_f:
+ match = re.search(DEPENDENCY_REGEX, line.strip())
+ if match:
+ try:
+ dependencies = parse_dependencies(match.group('dependencies'))
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ str(error) + " - %s:%d" % (funcs_f.name, funcs_f.line_no))
+ if re.search(END_DEP_REGEX, line):
+ break
+ else:
+ raise GeneratorInputError("file: %s - end dependency pattern [%s]"
+ " not found!" % (funcs_f.name,
+ END_DEP_REGEX))
+
+ return dependencies
+
+
+def parse_function_dependencies(line):
+ """
+ Parses function dependencies, that are in the same line as
+ comment BEGIN_CASE. Dependencies are specified after pattern
+ 'depends_on:' and are delimited by ':'.
+
+ :param line: Line from .function file that has dependencies.
+ :return: List of dependencies.
+ """
+ dependencies = []
+ match = re.search(BEGIN_CASE_REGEX, line)
+ dep_str = match.group('depends_on')
+ if dep_str:
+ match = re.search(DEPENDENCY_REGEX, dep_str)
+ if match:
+ dependencies += parse_dependencies(match.group('dependencies'))
+
+ return dependencies
+
+
+def parse_function_arguments(line):
+ """
+ Parses test function signature for validation and generates
+ a dispatch wrapper function that translates input test vectors
+ read from the data file into test function arguments.
+
+ :param line: Line from .function file that has a function
+ signature.
+ :return: argument list, local variables for
+ wrapper function and argument dispatch code.
+ """
+ args = []
+ local_vars = ''
+ args_dispatch = []
+ arg_idx = 0
+ # Remove characters before arguments
+ line = line[line.find('(') + 1:]
+ # Process arguments, ex: <type> arg1, <type> arg2 )
+ # This script assumes that the argument list is terminated by ')'
+ # i.e. the test functions will not have a function pointer
+ # argument.
+ for arg in line[:line.find(')')].split(','):
+ arg = arg.strip()
+ if arg == '':
+ continue
+ if re.search(INT_CHECK_REGEX, arg.strip()):
+ args.append('int')
+ args_dispatch.append('*( (int *) params[%d] )' % arg_idx)
+ elif re.search(CHAR_CHECK_REGEX, arg.strip()):
+ args.append('char*')
+ args_dispatch.append('(char *) params[%d]' % arg_idx)
+ elif re.search(DATA_T_CHECK_REGEX, arg.strip()):
+ args.append('hex')
+ # create a structure
+ pointer_initializer = '(uint8_t *) params[%d]' % arg_idx
+ len_initializer = '*( (uint32_t *) params[%d] )' % (arg_idx+1)
+ local_vars += """ data_t data%d = {%s, %s};
+""" % (arg_idx, pointer_initializer, len_initializer)
+
+ args_dispatch.append('&data%d' % arg_idx)
+ arg_idx += 1
+ else:
+ raise ValueError("Test function arguments can only be 'int', "
+ "'char *' or 'data_t'\n%s" % line)
+ arg_idx += 1
+
+ return args, local_vars, args_dispatch
+
+
+def generate_function_code(name, code, local_vars, args_dispatch,
+ dependencies):
+ """
+ Generate function code with preprocessor checks and parameter dispatch
+ wrapper.
+
+ :param name: Function name
+ :param code: Function code
+ :param local_vars: Local variables for function wrapper
+ :param args_dispatch: Argument dispatch code
+ :param dependencies: Preprocessor dependencies list
+ :return: Final function code
+ """
+ # Add exit label if not present
+ if code.find('exit:') == -1:
+ split_code = code.rsplit('}', 1)
+ if len(split_code) == 2:
+ code = """exit:
+ ;
+}""".join(split_code)
+
+ code += gen_function_wrapper(name, local_vars, args_dispatch)
+ preprocessor_check_start, preprocessor_check_end = \
+ gen_dependencies(dependencies)
+ return preprocessor_check_start + code + preprocessor_check_end
+
+
+def parse_function_code(funcs_f, dependencies, suite_dependencies):
+ """
+ Parses out a function from function file object and generates
+ function and dispatch code.
+
+ :param funcs_f: file object of the functions file.
+ :param dependencies: List of dependencies
+ :param suite_dependencies: List of test suite dependencies
+ :return: Function name, arguments, function code and dispatch code.
+ """
+ line_directive = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+ code = ''
+ has_exit_label = False
+ for line in funcs_f:
+ # Check function signature. Function signature may be split
+ # across multiple lines. Here we try to find the start of
+ # arguments list, then remove '\n's and apply the regex to
+ # detect function start.
+ up_to_arg_list_start = code + line[:line.find('(') + 1]
+ match = re.match(TEST_FUNCTION_VALIDATION_REGEX,
+ up_to_arg_list_start.replace('\n', ' '), re.I)
+ if match:
+ # check if we have full signature i.e. split in more lines
+ name = match.group('func_name')
+ if not re.match(FUNCTION_ARG_LIST_END_REGEX, line):
+ for lin in funcs_f:
+ line += lin
+ if re.search(FUNCTION_ARG_LIST_END_REGEX, line):
+ break
+ args, local_vars, args_dispatch = parse_function_arguments(
+ line)
+ code += line
+ break
+ code += line
+ else:
+ raise GeneratorInputError("file: %s - Test functions not found!" %
+ funcs_f.name)
+
+ # Prefix test function name with 'test_'
+ code = code.replace(name, 'test_' + name, 1)
+ name = 'test_' + name
+
+ for line in funcs_f:
+ if re.search(END_CASE_REGEX, line):
+ break
+ if not has_exit_label:
+ has_exit_label = \
+ re.search(EXIT_LABEL_REGEX, line.strip()) is not None
+ code += line
+ else:
+ raise GeneratorInputError("file: %s - end case pattern [%s] not "
+ "found!" % (funcs_f.name, END_CASE_REGEX))
+
+ code = line_directive + code
+ code = generate_function_code(name, code, local_vars, args_dispatch,
+ dependencies)
+ dispatch_code = gen_dispatch(name, suite_dependencies + dependencies)
+ return (name, args, code, dispatch_code)
+
+
+def parse_functions(funcs_f):
+ """
+ Parses a test_suite_xxx.function file and returns information
+ for generating a C source file for the test suite.
+
+ :param funcs_f: file object of the functions file.
+ :return: List of test suite dependencies, test function dispatch
+ code, function code and a dict with function identifiers
+ and arguments info.
+ """
+ suite_helpers = ''
+ suite_dependencies = []
+ suite_functions = ''
+ func_info = {}
+ function_idx = 0
+ dispatch_code = ''
+ for line in funcs_f:
+ if re.search(BEGIN_HEADER_REGEX, line):
+ suite_helpers += parse_until_pattern(funcs_f, END_HEADER_REGEX)
+ elif re.search(BEGIN_SUITE_HELPERS_REGEX, line):
+ suite_helpers += parse_until_pattern(funcs_f,
+ END_SUITE_HELPERS_REGEX)
+ elif re.search(BEGIN_DEP_REGEX, line):
+ suite_dependencies += parse_suite_dependencies(funcs_f)
+ elif re.search(BEGIN_CASE_REGEX, line):
+ try:
+ dependencies = parse_function_dependencies(line)
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ "%s:%d: %s" % (funcs_f.name, funcs_f.line_no,
+ str(error)))
+ func_name, args, func_code, func_dispatch =\
+ parse_function_code(funcs_f, dependencies, suite_dependencies)
+ suite_functions += func_code
+ # Generate dispatch code and enumeration info
+ if func_name in func_info:
+ raise GeneratorInputError(
+ "file: %s - function %s re-declared at line %d" %
+ (funcs_f.name, func_name, funcs_f.line_no))
+ func_info[func_name] = (function_idx, args)
+ dispatch_code += '/* Function Id: %d */\n' % function_idx
+ dispatch_code += func_dispatch
+ function_idx += 1
+
+ func_code = (suite_helpers +
+ suite_functions).join(gen_dependencies(suite_dependencies))
+ return suite_dependencies, dispatch_code, func_code, func_info
+
+
+def escaped_split(inp_str, split_char):
+ """
+ Split inp_str on character split_char but ignore if escaped.
+ Since, return value is used to write back to the intermediate
+ data file, any escape characters in the input are retained in the
+ output.
+
+ :param inp_str: String to split
+ :param split_char: Split character
+ :return: List of splits
+ """
+ if len(split_char) > 1:
+ raise ValueError('Expected split character. Found string!')
+ out = re.sub(r'(\\.)|' + split_char,
+ lambda m: m.group(1) or '\n', inp_str,
+ len(inp_str)).split('\n')
+ out = [x for x in out if x]
+ return out
+
+
+def parse_test_data(data_f):
+ """
+ Parses .data file for each test case name, test function name,
+ test dependencies and test arguments. This information is
+ correlated with the test functions file for generating an
+ intermediate data file replacing the strings for test function
+ names, dependencies and integer constant expressions with
+ identifiers. Mainly for optimising space for on-target
+ execution.
+
+ :param data_f: file object of the data file.
+ :return: Generator that yields test name, function name,
+ dependency list and function argument list.
+ """
+ __state_read_name = 0
+ __state_read_args = 1
+ state = __state_read_name
+ dependencies = []
+ name = ''
+ for line in data_f:
+ line = line.strip()
+ # Skip comments
+ if line.startswith('#'):
+ continue
+
+ # Blank line indicates end of test
+ if not line:
+ if state == __state_read_args:
+ raise GeneratorInputError("[%s:%d] Newline before arguments. "
+ "Test function and arguments "
+ "missing for %s" %
+ (data_f.name, data_f.line_no, name))
+ continue
+
+ if state == __state_read_name:
+ # Read test name
+ name = line
+ state = __state_read_args
+ elif state == __state_read_args:
+ # Check dependencies
+ match = re.search(DEPENDENCY_REGEX, line)
+ if match:
+ try:
+ dependencies = parse_dependencies(
+ match.group('dependencies'))
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ str(error) + " - %s:%d" %
+ (data_f.name, data_f.line_no))
+ else:
+ # Read test vectors
+ parts = escaped_split(line, ':')
+ test_function = parts[0]
+ args = parts[1:]
+ yield name, test_function, dependencies, args
+ dependencies = []
+ state = __state_read_name
+ if state == __state_read_args:
+ raise GeneratorInputError("[%s:%d] Newline before arguments. "
+ "Test function and arguments missing for "
+ "%s" % (data_f.name, data_f.line_no, name))
+
+
+def gen_dep_check(dep_id, dep):
+ """
+ Generate code for checking dependency with the associated
+ identifier.
+
+ :param dep_id: Dependency identifier
+ :param dep: Dependency macro
+ :return: Dependency check code
+ """
+ if dep_id < 0:
+ raise GeneratorInputError("Dependency Id should be a positive "
+ "integer.")
+ _not, dep = ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+ if not dep:
+ raise GeneratorInputError("Dependency should not be an empty string.")
+
+ dependency = re.match(CONDITION_REGEX, dep, re.I)
+ if not dependency:
+ raise GeneratorInputError('Invalid dependency %s' % dep)
+
+ _defined = '' if dependency.group(2) else 'defined'
+ _cond = dependency.group(2) if dependency.group(2) else ''
+ _value = dependency.group(3) if dependency.group(3) else ''
+
+ dep_check = '''
+ case {id}:
+ {{
+#if {_not}{_defined}({macro}{_cond}{_value})
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }}
+ break;'''.format(_not=_not, _defined=_defined,
+ macro=dependency.group(1), id=dep_id,
+ _cond=_cond, _value=_value)
+ return dep_check
+
+
+def gen_expression_check(exp_id, exp):
+ """
+ Generates code for evaluating an integer expression using
+ associated expression Id.
+
+ :param exp_id: Expression Identifier
+ :param exp: Expression/Macro
+ :return: Expression check code
+ """
+ if exp_id < 0:
+ raise GeneratorInputError("Expression Id should be a positive "
+ "integer.")
+ if not exp:
+ raise GeneratorInputError("Expression should not be an empty string.")
+ exp_code = '''
+ case {exp_id}:
+ {{
+ *out_value = {expression};
+ }}
+ break;'''.format(exp_id=exp_id, expression=exp)
+ return exp_code
+
+
+def write_dependencies(out_data_f, test_dependencies, unique_dependencies):
+ """
+ Write dependencies to intermediate test data file, replacing
+ the string form with identifiers. Also, generates dependency
+ check code.
+
+ :param out_data_f: Output intermediate data file
+ :param test_dependencies: Dependencies
+ :param unique_dependencies: Mutable list to track unique dependencies
+ that are global to this re-entrant function.
+ :return: returns dependency check code.
+ """
+ dep_check_code = ''
+ if test_dependencies:
+ out_data_f.write('depends_on')
+ for dep in test_dependencies:
+ if dep not in unique_dependencies:
+ unique_dependencies.append(dep)
+ dep_id = unique_dependencies.index(dep)
+ dep_check_code += gen_dep_check(dep_id, dep)
+ else:
+ dep_id = unique_dependencies.index(dep)
+ out_data_f.write(':' + str(dep_id))
+ out_data_f.write('\n')
+ return dep_check_code
+
+
+def write_parameters(out_data_f, test_args, func_args, unique_expressions):
+ """
+ Writes test parameters to the intermediate data file, replacing
+ the string form with identifiers. Also, generates expression
+ check code.
+
+ :param out_data_f: Output intermediate data file
+ :param test_args: Test parameters
+ :param func_args: Function arguments
+ :param unique_expressions: Mutable list to track unique
+ expressions that are global to this re-entrant function.
+ :return: Returns expression check code.
+ """
+ expression_code = ''
+ for i, _ in enumerate(test_args):
+ typ = func_args[i]
+ val = test_args[i]
+
+ # check if val is a non literal int val (i.e. an expression)
+ if typ == 'int' and not re.match(r'(\d+|0x[0-9a-f]+)$',
+ val, re.I):
+ typ = 'exp'
+ if val not in unique_expressions:
+ unique_expressions.append(val)
+ # exp_id can be derived from len(). But for
+ # readability and consistency with case of existing
+ # let's use index().
+ exp_id = unique_expressions.index(val)
+ expression_code += gen_expression_check(exp_id, val)
+ val = exp_id
+ else:
+ val = unique_expressions.index(val)
+ out_data_f.write(':' + typ + ':' + str(val))
+ out_data_f.write('\n')
+ return expression_code
+
+
+def gen_suite_dep_checks(suite_dependencies, dep_check_code, expression_code):
+ """
+ Generates preprocessor checks for test suite dependencies.
+
+ :param suite_dependencies: Test suite dependencies read from the
+ .function file.
+ :param dep_check_code: Dependency check code
+ :param expression_code: Expression check code
+ :return: Dependency and expression code guarded by test suite
+ dependencies.
+ """
+ if suite_dependencies:
+ preprocessor_check = gen_dependencies_one_line(suite_dependencies)
+ dep_check_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=dep_check_code)
+ expression_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=expression_code)
+ return dep_check_code, expression_code
+
+
+def gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies):
+ """
+ This function reads test case name, dependencies and test vectors
+ from the .data file. This information is correlated with the test
+ functions file for generating an intermediate data file replacing
+ the strings for test function names, dependencies and integer
+ constant expressions with identifiers. Mainly for optimising
+ space for on-target execution.
+ It also generates test case dependency check code and expression
+ evaluation code.
+
+ :param data_f: Data file object
+ :param out_data_f: Output intermediate data file
+ :param func_info: Dict keyed by function and with function id
+ and arguments info
+ :param suite_dependencies: Test suite dependencies
+ :return: Returns dependency and expression check code
+ """
+ unique_dependencies = []
+ unique_expressions = []
+ dep_check_code = ''
+ expression_code = ''
+ for test_name, function_name, test_dependencies, test_args in \
+ parse_test_data(data_f):
+ out_data_f.write(test_name + '\n')
+
+ # Write dependencies
+ dep_check_code += write_dependencies(out_data_f, test_dependencies,
+ unique_dependencies)
+
+ # Write test function name
+ test_function_name = 'test_' + function_name
+ if test_function_name not in func_info:
+ raise GeneratorInputError("Function %s not found!" %
+ test_function_name)
+ func_id, func_args = func_info[test_function_name]
+ out_data_f.write(str(func_id))
+
+ # Write parameters
+ if len(test_args) != len(func_args):
+ raise GeneratorInputError("Invalid number of arguments in test "
+ "%s. See function %s signature." %
+ (test_name, function_name))
+ expression_code += write_parameters(out_data_f, test_args, func_args,
+ unique_expressions)
+
+ # Write a newline as test case separator
+ out_data_f.write('\n')
+
+ dep_check_code, expression_code = gen_suite_dep_checks(
+ suite_dependencies, dep_check_code, expression_code)
+ return dep_check_code, expression_code
+
+
+def add_input_info(funcs_file, data_file, template_file,
+ c_file, snippets):
+ """
+ Add generator input info in snippets.
+
+ :param funcs_file: Functions file object
+ :param data_file: Data file object
+ :param template_file: Template file object
+ :param c_file: Output C file object
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ snippets['test_file'] = c_file
+ snippets['test_main_file'] = template_file
+ snippets['test_case_file'] = funcs_file
+ snippets['test_case_data_file'] = data_file
+
+
+def read_code_from_input_files(platform_file, helpers_file,
+ out_data_file, snippets):
+ """
+ Read code from input files and create substitutions for replacement
+ strings in the template file.
+
+ :param platform_file: Platform file object
+ :param helpers_file: Helper functions file object
+ :param out_data_file: Output intermediate data file object
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ # Read helpers
+ with open(helpers_file, 'r') as help_f, open(platform_file, 'r') as \
+ platform_f:
+ snippets['test_common_helper_file'] = helpers_file
+ snippets['test_common_helpers'] = help_f.read()
+ snippets['test_platform_file'] = platform_file
+ snippets['platform_code'] = platform_f.read().replace(
+ 'DATA_FILE', out_data_file.replace('\\', '\\\\')) # escape '\'
+
+
+def write_test_source_file(template_file, c_file, snippets):
+ """
+ Write output source file with generated source code.
+
+ :param template_file: Template file name
+ :param c_file: Output source file
+ :param snippets: Generated and code snippets
+ :return:
+ """
+ with open(template_file, 'r') as template_f, open(c_file, 'w') as c_f:
+ for line_no, line in enumerate(template_f.readlines(), 1):
+ # Update line number. +1 as #line directive sets next line number
+ snippets['line_no'] = line_no + 1
+ code = string.Template(line).substitute(**snippets)
+ c_f.write(code)
+
+
+def parse_function_file(funcs_file, snippets):
+ """
+ Parse function file and generate function dispatch code.
+
+ :param funcs_file: Functions file name
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ with FileWrapper(funcs_file) as funcs_f:
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(funcs_f)
+ snippets['functions_code'] = func_code
+ snippets['dispatch_code'] = dispatch_code
+ return suite_dependencies, func_info
+
+
+def generate_intermediate_data_file(data_file, out_data_file,
+ suite_dependencies, func_info, snippets):
+ """
+ Generates intermediate data file from input data file and
+ information read from functions file.
+
+ :param data_file: Data file name
+ :param out_data_file: Output/Intermediate data file
+ :param suite_dependencies: List of suite dependencies.
+ :param func_info: Function info parsed from functions file.
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ with FileWrapper(data_file) as data_f, \
+ open(out_data_file, 'w') as out_data_f:
+ dep_check_code, expression_code = gen_from_test_data(
+ data_f, out_data_f, func_info, suite_dependencies)
+ snippets['dep_check_code'] = dep_check_code
+ snippets['expression_code'] = expression_code
+
+
+def generate_code(**input_info):
+ """
+ Generates C source code from test suite file, data file, common
+ helpers file and platform file.
+
+ input_info expands to following parameters:
+ funcs_file: Functions file object
+ data_file: Data file object
+ template_file: Template file object
+ platform_file: Platform file object
+ helpers_file: Helper functions file object
+ suites_dir: Test suites dir
+ c_file: Output C file object
+ out_data_file: Output intermediate data file object
+ :return:
+ """
+ funcs_file = input_info['funcs_file']
+ data_file = input_info['data_file']
+ template_file = input_info['template_file']
+ platform_file = input_info['platform_file']
+ helpers_file = input_info['helpers_file']
+ suites_dir = input_info['suites_dir']
+ c_file = input_info['c_file']
+ out_data_file = input_info['out_data_file']
+ for name, path in [('Functions file', funcs_file),
+ ('Data file', data_file),
+ ('Template file', template_file),
+ ('Platform file', platform_file),
+ ('Helpers code file', helpers_file),
+ ('Suites dir', suites_dir)]:
+ if not os.path.exists(path):
+ raise IOError("ERROR: %s [%s] not found!" % (name, path))
+
+ snippets = {'generator_script': os.path.basename(__file__)}
+ read_code_from_input_files(platform_file, helpers_file,
+ out_data_file, snippets)
+ add_input_info(funcs_file, data_file, template_file,
+ c_file, snippets)
+ suite_dependencies, func_info = parse_function_file(funcs_file, snippets)
+ generate_intermediate_data_file(data_file, out_data_file,
+ suite_dependencies, func_info, snippets)
+ write_test_source_file(template_file, c_file, snippets)
+
+
+def main():
+ """
+ Command line parser.
+
+ :return:
+ """
+ parser = argparse.ArgumentParser(
+ description='Dynamically generate test suite code.')
+
+ parser.add_argument("-f", "--functions-file",
+ dest="funcs_file",
+ help="Functions file",
+ metavar="FUNCTIONS_FILE",
+ required=True)
+
+ parser.add_argument("-d", "--data-file",
+ dest="data_file",
+ help="Data file",
+ metavar="DATA_FILE",
+ required=True)
+
+ parser.add_argument("-t", "--template-file",
+ dest="template_file",
+ help="Template file",
+ metavar="TEMPLATE_FILE",
+ required=True)
+
+ parser.add_argument("-s", "--suites-dir",
+ dest="suites_dir",
+ help="Suites dir",
+ metavar="SUITES_DIR",
+ required=True)
+
+ parser.add_argument("--helpers-file",
+ dest="helpers_file",
+ help="Helpers file",
+ metavar="HELPERS_FILE",
+ required=True)
+
+ parser.add_argument("-p", "--platform-file",
+ dest="platform_file",
+ help="Platform code file",
+ metavar="PLATFORM_FILE",
+ required=True)
+
+ parser.add_argument("-o", "--out-dir",
+ dest="out_dir",
+ help="Dir where generated code and scripts are copied",
+ metavar="OUT_DIR",
+ required=True)
+
+ args = parser.parse_args()
+
+ data_file_name = os.path.basename(args.data_file)
+ data_name = os.path.splitext(data_file_name)[0]
+
+ out_c_file = os.path.join(args.out_dir, data_name + '.c')
+ out_data_file = os.path.join(args.out_dir, data_name + '.datax')
+
+ out_c_file_dir = os.path.dirname(out_c_file)
+ out_data_file_dir = os.path.dirname(out_data_file)
+ for directory in [out_c_file_dir, out_data_file_dir]:
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ generate_code(funcs_file=args.funcs_file, data_file=args.data_file,
+ template_file=args.template_file,
+ platform_file=args.platform_file,
+ helpers_file=args.helpers_file, suites_dir=args.suites_dir,
+ c_file=out_c_file, out_data_file=out_data_file)
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except GeneratorInputError as err:
+ sys.exit("%s: input error: %s" %
+ (os.path.basename(sys.argv[0]), str(err)))
diff --git a/tests/scripts/key-exchanges.pl b/tests/scripts/key-exchanges.pl
index d167c67..46f1b97 100755
--- a/tests/scripts/key-exchanges.pl
+++ b/tests/scripts/key-exchanges.pl
@@ -1,8 +1,21 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# key-exchanges.pl
#
-# Copyright (c) 2015-2017, ARM Limited, All Rights Reserved
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -27,7 +40,7 @@
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
my $sed_cmd = 's/^#define \(MBEDTLS_KEY_EXCHANGE_.*_ENABLED\)/\1/p';
-my $config_h = 'include/mbedtls/config.h';
+my $config_h = 'include/mbedtls/mbedtls_config.h';
my @kexes = split( /\s+/, `sed -n -e '$sed_cmd' $config_h` );
system( "cp $config_h $config_h.bak" ) and die;
@@ -45,12 +58,13 @@
print "\n******************************************\n";
print "* Testing with key exchange: $kex\n";
print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = $kex;
# full config with all key exchanges disabled except one
- system( "scripts/config.pl full" ) and abort "Failed config full\n";
+ system( "scripts/config.py full" ) and abort "Failed config full\n";
for my $k (@kexes) {
next if $k eq $kex;
- system( "scripts/config.pl unset $k" )
+ system( "scripts/config.py unset $k" )
and abort "Failed to disable $k\n";
}
diff --git a/tests/scripts/mbedtls_test.py b/tests/scripts/mbedtls_test.py
new file mode 100755
index 0000000..64f12bb
--- /dev/null
+++ b/tests/scripts/mbedtls_test.py
@@ -0,0 +1,382 @@
+#!/usr/bin/env python3
+
+# Greentea host test script for Mbed TLS on-target test suite testing.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+"""
+Mbed TLS on-target test suite tests are implemented as Greentea
+tests. Greentea tests are implemented in two parts: target test and
+host test. Target test is a C application that is built for the
+target platform and executes on the target. Host test is a Python
+class derived from mbed_host_tests.BaseHostTest. Target communicates
+with the host over serial for the test data and sends back the result.
+
+Python tool mbedgt (Greentea) is responsible for flashing the test
+binary on to the target and dynamically loading this host test module.
+
+Greentea documentation can be found here:
+https://github.com/ARMmbed/greentea
+"""
+
+
+import re
+import os
+import binascii
+
+from mbed_host_tests import BaseHostTest, event_callback # type: ignore # pylint: disable=import-error
+
+
+class TestDataParserError(Exception):
+ """Indicates error in test data, read from .data file."""
+ pass
+
+
+class TestDataParser:
+ """
+ Parses test name, dependencies, test function name and test parameters
+ from the data file.
+ """
+
+ def __init__(self):
+ """
+ Constructor
+ """
+ self.tests = []
+
+ def parse(self, data_file):
+ """
+ Data file parser.
+
+ :param data_file: Data file path
+ """
+ with open(data_file, 'r') as data_f:
+ self.__parse(data_f)
+
+ @staticmethod
+ def __escaped_split(inp_str, split_char):
+ """
+ Splits inp_str on split_char except when escaped.
+
+ :param inp_str: String to split
+ :param split_char: Split character
+ :return: List of splits
+ """
+ split_colon_fn = lambda x: re.sub(r'\\' + split_char, split_char, x)
+ if len(split_char) > 1:
+ raise ValueError('Expected split character. Found string!')
+ out = list(map(split_colon_fn, re.split(r'(?<!\\)' + split_char, inp_str)))
+ out = [x for x in out if x]
+ return out
+
+ def __parse(self, data_f):
+ """
+ Parses data file using supplied file object.
+
+ :param data_f: Data file object
+ :return:
+ """
+ for line in data_f:
+ line = line.strip()
+ if not line:
+ continue
+ # Read test name
+ name = line
+
+ # Check dependencies
+ dependencies = []
+ line = next(data_f).strip()
+ match = re.search('depends_on:(.*)', line)
+ if match:
+ dependencies = [int(x) for x in match.group(1).split(':')]
+ line = next(data_f).strip()
+
+ # Read test vectors
+ line = line.replace('\\n', '\n')
+ parts = self.__escaped_split(line, ':')
+ function_name = int(parts[0])
+ args = parts[1:]
+ args_count = len(args)
+ if args_count % 2 != 0:
+ err_str_fmt = "Number of test arguments({}) should be even: {}"
+ raise TestDataParserError(err_str_fmt.format(args_count, line))
+ grouped_args = [(args[i * 2], args[(i * 2) + 1])
+ for i in range(int(len(args)/2))]
+ self.tests.append((name, function_name, dependencies,
+ grouped_args))
+
+ def get_test_data(self):
+ """
+ Returns test data.
+ """
+ return self.tests
+
+
+class MbedTlsTest(BaseHostTest):
+ """
+ Host test for Mbed TLS unit tests. This script is loaded at
+ run time by Greentea for executing Mbed TLS test suites. Each
+ communication from the target is received in this object as
+ an event, which is then handled by the event handler method
+ decorated by the associated event. Ex: @event_callback('GO').
+
+ Target test sends requests for dispatching next test. It reads
+ tests from the intermediate data file and sends test function
+ identifier, dependency identifiers, expression identifiers and
+ the test data in binary form. Target test checks dependencies
+ , evaluate integer constant expressions and dispatches the test
+ function with received test parameters. After test function is
+ finished, target sends the result. This class handles the result
+ event and prints verdict in the form that Greentea understands.
+
+ """
+ # status/error codes from suites/helpers.function
+ DEPENDENCY_SUPPORTED = 0
+ KEY_VALUE_MAPPING_FOUND = DEPENDENCY_SUPPORTED
+ DISPATCH_TEST_SUCCESS = DEPENDENCY_SUPPORTED
+
+ KEY_VALUE_MAPPING_NOT_FOUND = -1 # Expression Id not found.
+ DEPENDENCY_NOT_SUPPORTED = -2 # Dependency not supported.
+ DISPATCH_TEST_FN_NOT_FOUND = -3 # Test function not found.
+ DISPATCH_INVALID_TEST_DATA = -4 # Invalid parameter type.
+ DISPATCH_UNSUPPORTED_SUITE = -5 # Test suite not supported/enabled.
+
+ def __init__(self):
+ """
+ Constructor initialises test index to 0.
+ """
+ super(MbedTlsTest, self).__init__()
+ self.tests = []
+ self.test_index = -1
+ self.dep_index = 0
+ self.suite_passed = True
+ self.error_str = dict()
+ self.error_str[self.DEPENDENCY_SUPPORTED] = \
+ 'DEPENDENCY_SUPPORTED'
+ self.error_str[self.KEY_VALUE_MAPPING_NOT_FOUND] = \
+ 'KEY_VALUE_MAPPING_NOT_FOUND'
+ self.error_str[self.DEPENDENCY_NOT_SUPPORTED] = \
+ 'DEPENDENCY_NOT_SUPPORTED'
+ self.error_str[self.DISPATCH_TEST_FN_NOT_FOUND] = \
+ 'DISPATCH_TEST_FN_NOT_FOUND'
+ self.error_str[self.DISPATCH_INVALID_TEST_DATA] = \
+ 'DISPATCH_INVALID_TEST_DATA'
+ self.error_str[self.DISPATCH_UNSUPPORTED_SUITE] = \
+ 'DISPATCH_UNSUPPORTED_SUITE'
+
+ def setup(self):
+ """
+ Setup hook implementation. Reads test suite data file and parses out
+ tests.
+ """
+ binary_path = self.get_config_item('image_path')
+ script_dir = os.path.split(os.path.abspath(__file__))[0]
+ suite_name = os.path.splitext(os.path.basename(binary_path))[0]
+ data_file = ".".join((suite_name, 'datax'))
+ data_file = os.path.join(script_dir, '..', 'mbedtls',
+ suite_name, data_file)
+ if os.path.exists(data_file):
+ self.log("Running tests from %s" % data_file)
+ parser = TestDataParser()
+ parser.parse(data_file)
+ self.tests = parser.get_test_data()
+ self.print_test_info()
+ else:
+ self.log("Data file not found: %s" % data_file)
+ self.notify_complete(False)
+
+ def print_test_info(self):
+ """
+ Prints test summary read by Greentea to detect test cases.
+ """
+ self.log('{{__testcase_count;%d}}' % len(self.tests))
+ for name, _, _, _ in self.tests:
+ self.log('{{__testcase_name;%s}}' % name)
+
+ @staticmethod
+ def align_32bit(data_bytes):
+ """
+ 4 byte aligns input byte array.
+
+ :return:
+ """
+ data_bytes += bytearray((4 - (len(data_bytes))) % 4)
+
+ @staticmethod
+ def hex_str_bytes(hex_str):
+ """
+ Converts Hex string representation to byte array
+
+ :param hex_str: Hex in string format.
+ :return: Output Byte array
+ """
+ if hex_str[0] != '"' or hex_str[len(hex_str) - 1] != '"':
+ raise TestDataParserError("HEX test parameter missing '\"':"
+ " %s" % hex_str)
+ hex_str = hex_str.strip('"')
+ if len(hex_str) % 2 != 0:
+ raise TestDataParserError("HEX parameter len should be mod of "
+ "2: %s" % hex_str)
+
+ data_bytes = binascii.unhexlify(hex_str)
+ return data_bytes
+
+ @staticmethod
+ def int32_to_big_endian_bytes(i):
+ """
+ Coverts i to byte array in big endian format.
+
+ :param i: Input integer
+ :return: Output bytes array in big endian or network order
+ """
+ data_bytes = bytearray([((i >> x) & 0xff) for x in [24, 16, 8, 0]])
+ return data_bytes
+
+ def test_vector_to_bytes(self, function_id, dependencies, parameters):
+ """
+ Converts test vector into a byte array that can be sent to the target.
+
+ :param function_id: Test Function Identifier
+ :param dependencies: Dependency list
+ :param parameters: Test function input parameters
+ :return: Byte array and its length
+ """
+ data_bytes = bytearray([len(dependencies)])
+ if dependencies:
+ data_bytes += bytearray(dependencies)
+ data_bytes += bytearray([function_id, len(parameters)])
+ for typ, param in parameters:
+ if typ in ('int', 'exp'):
+ i = int(param, 0)
+ data_bytes += b'I' if typ == 'int' else b'E'
+ self.align_32bit(data_bytes)
+ data_bytes += self.int32_to_big_endian_bytes(i)
+ elif typ == 'char*':
+ param = param.strip('"')
+ i = len(param) + 1 # + 1 for null termination
+ data_bytes += b'S'
+ self.align_32bit(data_bytes)
+ data_bytes += self.int32_to_big_endian_bytes(i)
+ data_bytes += bytearray(param, encoding='ascii')
+ data_bytes += b'\0' # Null terminate
+ elif typ == 'hex':
+ binary_data = self.hex_str_bytes(param)
+ data_bytes += b'H'
+ self.align_32bit(data_bytes)
+ i = len(binary_data)
+ data_bytes += self.int32_to_big_endian_bytes(i)
+ data_bytes += binary_data
+ length = self.int32_to_big_endian_bytes(len(data_bytes))
+ return data_bytes, length
+
+ def run_next_test(self):
+ """
+ Fetch next test information and execute the test.
+
+ """
+ self.test_index += 1
+ self.dep_index = 0
+ if self.test_index < len(self.tests):
+ name, function_id, dependencies, args = self.tests[self.test_index]
+ self.run_test(name, function_id, dependencies, args)
+ else:
+ self.notify_complete(self.suite_passed)
+
+ def run_test(self, name, function_id, dependencies, args):
+ """
+ Execute the test on target by sending next test information.
+
+ :param name: Test name
+ :param function_id: function identifier
+ :param dependencies: Dependencies list
+ :param args: test parameters
+ :return:
+ """
+ self.log("Running: %s" % name)
+
+ param_bytes, length = self.test_vector_to_bytes(function_id,
+ dependencies, args)
+ self.send_kv(
+ ''.join('{:02x}'.format(x) for x in length),
+ ''.join('{:02x}'.format(x) for x in param_bytes)
+ )
+
+ @staticmethod
+ def get_result(value):
+ """
+ Converts result from string type to integer
+ :param value: Result code in string
+ :return: Integer result code. Value is from the test status
+ constants defined under the MbedTlsTest class.
+ """
+ try:
+ return int(value)
+ except ValueError:
+ ValueError("Result should return error number. "
+ "Instead received %s" % value)
+
+ @event_callback('GO')
+ def on_go(self, _key, _value, _timestamp):
+ """
+ Sent by the target to start first test.
+
+ :param _key: Event key
+ :param _value: Value. ignored
+ :param _timestamp: Timestamp ignored.
+ :return:
+ """
+ self.run_next_test()
+
+ @event_callback("R")
+ def on_result(self, _key, value, _timestamp):
+ """
+ Handle result. Prints test start, finish required by Greentea
+ to detect test execution.
+
+ :param _key: Event key
+ :param value: Value. ignored
+ :param _timestamp: Timestamp ignored.
+ :return:
+ """
+ int_val = self.get_result(value)
+ name, _, _, _ = self.tests[self.test_index]
+ self.log('{{__testcase_start;%s}}' % name)
+ self.log('{{__testcase_finish;%s;%d;%d}}' % (name, int_val == 0,
+ int_val != 0))
+ if int_val != 0:
+ self.suite_passed = False
+ self.run_next_test()
+
+ @event_callback("F")
+ def on_failure(self, _key, value, _timestamp):
+ """
+ Handles test execution failure. That means dependency not supported or
+ Test function not supported. Hence marking test as skipped.
+
+ :param _key: Event key
+ :param value: Value. ignored
+ :param _timestamp: Timestamp ignored.
+ :return:
+ """
+ int_val = self.get_result(value)
+ if int_val in self.error_str:
+ err = self.error_str[int_val]
+ else:
+ err = 'Unknown error'
+ # For skip status, do not write {{__testcase_finish;...}}
+ self.log("Error: %s" % err)
+ self.run_next_test()
diff --git a/tests/scripts/psa_collect_statuses.py b/tests/scripts/psa_collect_statuses.py
new file mode 100755
index 0000000..b086793
--- /dev/null
+++ b/tests/scripts/psa_collect_statuses.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+"""Describe the test coverage of PSA functions in terms of return statuses.
+
+1. Build Mbed Crypto with -DRECORD_PSA_STATUS_COVERAGE_LOG
+2. Run psa_collect_statuses.py
+
+The output is a series of line of the form "psa_foo PSA_ERROR_XXX". Each
+function/status combination appears only once.
+
+This script must be run from the top of an Mbed Crypto source tree.
+The build command is "make -DRECORD_PSA_STATUS_COVERAGE_LOG", which is
+only supported with make (as opposed to CMake or other build methods).
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import subprocess
+import sys
+
+DEFAULT_STATUS_LOG_FILE = 'tests/statuses.log'
+DEFAULT_PSA_CONSTANT_NAMES = 'programs/psa/psa_constant_names'
+
+class Statuses:
+ """Information about observed return statues of API functions."""
+
+ def __init__(self):
+ self.functions = {}
+ self.codes = set()
+ self.status_names = {}
+
+ def collect_log(self, log_file_name):
+ """Read logs from RECORD_PSA_STATUS_COVERAGE_LOG.
+
+ Read logs produced by running Mbed Crypto test suites built with
+ -DRECORD_PSA_STATUS_COVERAGE_LOG.
+ """
+ with open(log_file_name) as log:
+ for line in log:
+ value, function, tail = line.split(':', 2)
+ if function not in self.functions:
+ self.functions[function] = {}
+ fdata = self.functions[function]
+ if value not in self.functions[function]:
+ fdata[value] = []
+ fdata[value].append(tail)
+ self.codes.add(int(value))
+
+ def get_constant_names(self, psa_constant_names):
+ """Run psa_constant_names to obtain names for observed numerical values."""
+ values = [str(value) for value in self.codes]
+ cmd = [psa_constant_names, 'status'] + values
+ output = subprocess.check_output(cmd).decode('ascii')
+ for value, name in zip(values, output.rstrip().split('\n')):
+ self.status_names[value] = name
+
+ def report(self):
+ """Report observed return values for each function.
+
+ The report is a series of line of the form "psa_foo PSA_ERROR_XXX".
+ """
+ for function in sorted(self.functions.keys()):
+ fdata = self.functions[function]
+ names = [self.status_names[value] for value in fdata.keys()]
+ for name in sorted(names):
+ sys.stdout.write('{} {}\n'.format(function, name))
+
+def collect_status_logs(options):
+ """Build and run unit tests and report observed function return statuses.
+
+ Build Mbed Crypto with -DRECORD_PSA_STATUS_COVERAGE_LOG, run the
+ test suites and display information about observed return statuses.
+ """
+ rebuilt = False
+ if not options.use_existing_log and os.path.exists(options.log_file):
+ os.remove(options.log_file)
+ if not os.path.exists(options.log_file):
+ if options.clean_before:
+ subprocess.check_call(['make', 'clean'],
+ cwd='tests',
+ stdout=sys.stderr)
+ with open(os.devnull, 'w') as devnull:
+ make_q_ret = subprocess.call(['make', '-q', 'lib', 'tests'],
+ stdout=devnull, stderr=devnull)
+ if make_q_ret != 0:
+ subprocess.check_call(['make', 'RECORD_PSA_STATUS_COVERAGE_LOG=1'],
+ stdout=sys.stderr)
+ rebuilt = True
+ subprocess.check_call(['make', 'test'],
+ stdout=sys.stderr)
+ data = Statuses()
+ data.collect_log(options.log_file)
+ data.get_constant_names(options.psa_constant_names)
+ if rebuilt and options.clean_after:
+ subprocess.check_call(['make', 'clean'],
+ cwd='tests',
+ stdout=sys.stderr)
+ return data
+
+def main():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ parser.add_argument('--clean-after',
+ action='store_true',
+ help='Run "make clean" after rebuilding')
+ parser.add_argument('--clean-before',
+ action='store_true',
+ help='Run "make clean" before regenerating the log file)')
+ parser.add_argument('--log-file', metavar='FILE',
+ default=DEFAULT_STATUS_LOG_FILE,
+ help='Log file location (default: {})'.format(
+ DEFAULT_STATUS_LOG_FILE
+ ))
+ parser.add_argument('--psa-constant-names', metavar='PROGRAM',
+ default=DEFAULT_PSA_CONSTANT_NAMES,
+ help='Path to psa_constant_names (default: {})'.format(
+ DEFAULT_PSA_CONSTANT_NAMES
+ ))
+ parser.add_argument('--use-existing-log', '-e',
+ action='store_true',
+ help='Don\'t regenerate the log file if it exists')
+ options = parser.parse_args()
+ data = collect_status_logs(options)
+ data.report()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/recursion.pl b/tests/scripts/recursion.pl
index 3ad42b1..e4b2d94 100755
--- a/tests/scripts/recursion.pl
+++ b/tests/scripts/recursion.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Find functions making recursive calls to themselves.
# (Multiple recursion where a() calls b() which calls a() not covered.)
@@ -7,6 +7,21 @@
# an unbounded way, those functions should use interation instead.
#
# Typical usage: scripts/recursion.pl library/*.c
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use warnings;
use strict;
diff --git a/tests/scripts/run-test-suites.pl b/tests/scripts/run-test-suites.pl
index 7e2974b..15fa8bc 100755
--- a/tests/scripts/run-test-suites.pl
+++ b/tests/scripts/run-test-suites.pl
@@ -1,22 +1,38 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# run-test-suites.pl
#
-# This file is part of mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# Purpose
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Executes all the available test suites, and provides a basic summary of the
-# results.
-#
-# Usage: run-test-suites.pl [-v]
-#
-# Options :
-# -v|--verbose - Provide a pass/fail/skip breakdown per test suite and
-# in total
-#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+=head1 SYNOPSIS
+
+Execute all the test suites and print a summary of the results.
+
+ run-test-suites.pl [[-v|--verbose] [VERBOSITY]] [--skip=SUITE[...]]
+
+Options:
+
+ -v|--verbose Print detailed failure information.
+ -v 2|--verbose=2 Print detailed failure information and summary messages.
+ -v 3|--verbose=3 Print detailed information about every test case.
+ --skip=SUITE[,SUITE...]
+ Skip the specified SUITE(s). This option can be used
+ multiple times.
+
+=cut
use warnings;
use strict;
@@ -24,21 +40,34 @@
use utf8;
use open qw(:std utf8);
-use constant FALSE => 0;
-use constant TRUE => 1;
+use Getopt::Long qw(:config auto_help gnu_compat);
+use Pod::Usage;
-my $verbose;
-my $switch = shift;
-if ( defined($switch) && ( $switch eq "-v" || $switch eq "--verbose" ) ) {
- $verbose = TRUE;
-}
+my $verbose = 0;
+my @skip_patterns = ();
+GetOptions(
+ 'skip=s' => \@skip_patterns,
+ 'verbose|v:1' => \$verbose,
+ ) or die;
# All test suites = executable files, excluding source files, debug
# and profiling information, etc. We can't just grep {! /\./} because
-#some of our test cases' base names contain a dot.
+# some of our test cases' base names contain a dot.
my @suites = grep { -x $_ || /\.exe$/ } glob 'test_suite_*';
+@suites = grep { !/\.c$/ && !/\.data$/ && -f } @suites;
die "$0: no test suite found\n" unless @suites;
+# "foo" as a skip pattern skips "test_suite_foo" and "test_suite_foo.bar"
+# but not "test_suite_foobar".
+my $skip_re =
+ ( '\Atest_suite_(' .
+ join('|', map {
+ s/[ ,;]/|/g; # allow any of " ,;|" as separators
+ s/\./\./g; # "." in the input means ".", not "any character"
+ $_
+ } @skip_patterns) .
+ ')(\z|\.)' );
+
# in case test suites are linked dynamically
$ENV{'LD_LIBRARY_PATH'} = '../library';
$ENV{'DYLD_LIBRARY_PATH'} = '../library';
@@ -48,27 +77,54 @@
my ($failed_suites, $total_tests_run, $failed, $suite_cases_passed,
$suite_cases_failed, $suite_cases_skipped, $total_cases_passed,
$total_cases_failed, $total_cases_skipped );
+my $suites_skipped = 0;
+
+sub pad_print_center {
+ my( $width, $padchar, $string ) = @_;
+ my $padlen = ( $width - length( $string ) - 2 ) / 2;
+ print $padchar x( $padlen ), " $string ", $padchar x( $padlen ), "\n";
+}
for my $suite (@suites)
{
print "$suite ", "." x ( 72 - length($suite) - 2 - 4 ), " ";
- my $result = `$prefix$suite`;
+ if( $suite =~ /$skip_re/o ) {
+ print "SKIP\n";
+ ++$suites_skipped;
+ next;
+ }
+
+ my $command = "$prefix$suite";
+ if( $verbose ) {
+ $command .= ' -v';
+ }
+ my $result = `$command`;
$suite_cases_passed = () = $result =~ /.. PASS/g;
$suite_cases_failed = () = $result =~ /.. FAILED/g;
$suite_cases_skipped = () = $result =~ /.. ----/g;
- if( $result =~ /PASSED/ ) {
+ if( $? == 0 ) {
print "PASS\n";
+ if( $verbose > 2 ) {
+ pad_print_center( 72, '-', "Begin $suite" );
+ print $result;
+ pad_print_center( 72, '-', "End $suite" );
+ }
} else {
$failed_suites++;
print "FAIL\n";
+ if( $verbose ) {
+ pad_print_center( 72, '-', "Begin $suite" );
+ print $result;
+ pad_print_center( 72, '-', "End $suite" );
+ }
}
my ($passed, $tests, $skipped) = $result =~ /([0-9]*) \/ ([0-9]*) tests.*?([0-9]*) skipped/;
$total_tests_run += $tests - $skipped;
- if ( $verbose ) {
+ if( $verbose > 1 ) {
print "(test cases passed:", $suite_cases_passed,
" failed:", $suite_cases_failed,
" skipped:", $suite_cases_skipped,
@@ -84,9 +140,12 @@
print "-" x 72, "\n";
print $failed_suites ? "FAILED" : "PASSED";
-printf " (%d suites, %d tests run)\n", scalar @suites, $total_tests_run;
+printf( " (%d suites, %d tests run%s)\n",
+ scalar(@suites) - $suites_skipped,
+ $total_tests_run,
+ $suites_skipped ? ", $suites_skipped suites skipped" : "" );
-if ( $verbose ) {
+if( $verbose > 1 ) {
print " test cases passed :", $total_cases_passed, "\n";
print " failed :", $total_cases_failed, "\n";
print " skipped :", $total_cases_skipped, "\n";
@@ -94,8 +153,11 @@
"\n";
print " of available tests :",
( $total_cases_passed + $total_cases_failed + $total_cases_skipped ),
- "\n"
+ "\n";
+ if( $suites_skipped != 0 ) {
+ print "Note: $suites_skipped suites were skipped.\n";
}
+}
exit( $failed_suites ? 1 : 0 );
diff --git a/tests/scripts/scripts_path.py b/tests/scripts/scripts_path.py
new file mode 100644
index 0000000..10bf6f8
--- /dev/null
+++ b/tests/scripts/scripts_path.py
@@ -0,0 +1,28 @@
+"""Add our Python library directory to the module search path.
+
+Usage:
+
+ import scripts_path # pylint: disable=unused-import
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__),
+ os.path.pardir, os.path.pardir,
+ 'scripts'))
diff --git a/tests/scripts/set_psa_test_dependencies.py b/tests/scripts/set_psa_test_dependencies.py
new file mode 100755
index 0000000..7f4ebeb
--- /dev/null
+++ b/tests/scripts/set_psa_test_dependencies.py
@@ -0,0 +1,296 @@
+#!/usr/bin/env python3
+
+"""Edit test cases to use PSA dependencies instead of classic dependencies.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import re
+import sys
+
+CLASSIC_DEPENDENCIES = frozenset([
+ # This list is manually filtered from mbedtls_config.h.
+
+ # Mbed TLS feature support.
+ # Only features that affect what can be done are listed here.
+ # Options that control optimizations or alternative implementations
+ # are omitted.
+ 'MBEDTLS_CIPHER_MODE_CBC',
+ 'MBEDTLS_CIPHER_MODE_CFB',
+ 'MBEDTLS_CIPHER_MODE_CTR',
+ 'MBEDTLS_CIPHER_MODE_OFB',
+ 'MBEDTLS_CIPHER_MODE_XTS',
+ 'MBEDTLS_CIPHER_NULL_CIPHER',
+ 'MBEDTLS_CIPHER_PADDING_PKCS7',
+ 'MBEDTLS_CIPHER_PADDING_ONE_AND_ZEROS',
+ 'MBEDTLS_CIPHER_PADDING_ZEROS_AND_LEN',
+ 'MBEDTLS_CIPHER_PADDING_ZEROS',
+ #curve#'MBEDTLS_ECP_DP_SECP192R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP224R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP256R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP384R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP521R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP192K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP224K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP256K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP256R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP384R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP512R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_CURVE25519_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_CURVE448_ENABLED',
+ 'MBEDTLS_ECDSA_DETERMINISTIC',
+ #'MBEDTLS_GENPRIME', #needed for RSA key generation
+ 'MBEDTLS_PKCS1_V15',
+ 'MBEDTLS_PKCS1_V21',
+
+ # Mbed TLS modules.
+ # Only modules that provide cryptographic mechanisms are listed here.
+ # Platform, data formatting, X.509 or TLS modules are omitted.
+ 'MBEDTLS_AES_C',
+ 'MBEDTLS_BIGNUM_C',
+ 'MBEDTLS_CAMELLIA_C',
+ 'MBEDTLS_ARIA_C',
+ 'MBEDTLS_CCM_C',
+ 'MBEDTLS_CHACHA20_C',
+ 'MBEDTLS_CHACHAPOLY_C',
+ 'MBEDTLS_CMAC_C',
+ 'MBEDTLS_CTR_DRBG_C',
+ 'MBEDTLS_DES_C',
+ 'MBEDTLS_DHM_C',
+ 'MBEDTLS_ECDH_C',
+ 'MBEDTLS_ECDSA_C',
+ 'MBEDTLS_ECJPAKE_C',
+ 'MBEDTLS_ECP_C',
+ 'MBEDTLS_ENTROPY_C',
+ 'MBEDTLS_GCM_C',
+ 'MBEDTLS_HKDF_C',
+ 'MBEDTLS_HMAC_DRBG_C',
+ 'MBEDTLS_NIST_KW_C',
+ 'MBEDTLS_MD5_C',
+ 'MBEDTLS_PKCS5_C',
+ 'MBEDTLS_PKCS12_C',
+ 'MBEDTLS_POLY1305_C',
+ 'MBEDTLS_RIPEMD160_C',
+ 'MBEDTLS_RSA_C',
+ 'MBEDTLS_SHA1_C',
+ 'MBEDTLS_SHA256_C',
+ 'MBEDTLS_SHA512_C',
+])
+
+def is_classic_dependency(dep):
+ """Whether dep is a classic dependency that PSA test cases should not use."""
+ if dep.startswith('!'):
+ dep = dep[1:]
+ return dep in CLASSIC_DEPENDENCIES
+
+def is_systematic_dependency(dep):
+ """Whether dep is a PSA dependency which is determined systematically."""
+ if dep.startswith('PSA_WANT_ECC_'):
+ return False
+ return dep.startswith('PSA_WANT_')
+
+WITHOUT_SYSTEMATIC_DEPENDENCIES = frozenset([
+ 'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # only a modifier
+ 'PSA_ALG_ANY_HASH', # only meaningful in policies
+ 'PSA_ALG_KEY_AGREEMENT', # only a way to combine algorithms
+ 'PSA_ALG_TRUNCATED_MAC', # only a modifier
+ 'PSA_KEY_TYPE_NONE', # not a real key type
+ 'PSA_KEY_TYPE_DERIVE', # always supported, don't list it to reduce noise
+ 'PSA_KEY_TYPE_RAW_DATA', # always supported, don't list it to reduce noise
+ 'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', #only a modifier
+ 'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', #only a modifier
+])
+
+SPECIAL_SYSTEMATIC_DEPENDENCIES = {
+ 'PSA_ALG_ECDSA_ANY': frozenset(['PSA_WANT_ALG_ECDSA']),
+ 'PSA_ALG_RSA_PKCS1V15_SIGN_RAW': frozenset(['PSA_WANT_ALG_RSA_PKCS1V15_SIGN']),
+}
+
+def dependencies_of_symbol(symbol):
+ """Return the dependencies for a symbol that designates a cryptographic mechanism."""
+ if symbol in WITHOUT_SYSTEMATIC_DEPENDENCIES:
+ return frozenset()
+ if symbol in SPECIAL_SYSTEMATIC_DEPENDENCIES:
+ return SPECIAL_SYSTEMATIC_DEPENDENCIES[symbol]
+ if symbol.startswith('PSA_ALG_CATEGORY_') or \
+ symbol.startswith('PSA_KEY_TYPE_CATEGORY_'):
+ # Categories are used in test data when an unsupported but plausible
+ # mechanism number needed. They have no associated dependency.
+ return frozenset()
+ return {symbol.replace('_', '_WANT_', 1)}
+
+def systematic_dependencies(file_name, function_name, arguments):
+ """List the systematically determined dependency for a test case."""
+ deps = set()
+
+ # Run key policy negative tests even if the algorithm to attempt performing
+ # is not supported but in the case where the test is to check an
+ # incompatibility between a requested algorithm for a cryptographic
+ # operation and a key policy. In the latter, we want to filter out the
+ # cases # where PSA_ERROR_NOT_SUPPORTED is returned instead of
+ # PSA_ERROR_NOT_PERMITTED.
+ if function_name.endswith('_key_policy') and \
+ arguments[-1].startswith('PSA_ERROR_') and \
+ arguments[-1] != ('PSA_ERROR_NOT_PERMITTED'):
+ arguments[-2] = ''
+ if function_name == 'copy_fail' and \
+ arguments[-1].startswith('PSA_ERROR_'):
+ arguments[-2] = ''
+ arguments[-3] = ''
+
+ # Storage format tests that only look at how the file is structured and
+ # don't care about the format of the key material don't depend on any
+ # cryptographic mechanisms.
+ if os.path.basename(file_name) == 'test_suite_psa_crypto_persistent_key.data' and \
+ function_name in {'format_storage_data_check',
+ 'parse_storage_data_check'}:
+ return []
+
+ for arg in arguments:
+ for symbol in re.findall(r'PSA_(?:ALG|KEY_TYPE)_\w+', arg):
+ deps.update(dependencies_of_symbol(symbol))
+ return sorted(deps)
+
+def updated_dependencies(file_name, function_name, arguments, dependencies):
+ """Rework the list of dependencies into PSA_WANT_xxx.
+
+ Remove classic crypto dependencies such as MBEDTLS_RSA_C,
+ MBEDTLS_PKCS1_V15, etc.
+
+ Add systematic PSA_WANT_xxx dependencies based on the called function and
+ its arguments, replacing existing PSA_WANT_xxx dependencies.
+ """
+ automatic = systematic_dependencies(file_name, function_name, arguments)
+ manual = [dep for dep in dependencies
+ if not (is_systematic_dependency(dep) or
+ is_classic_dependency(dep))]
+ return automatic + manual
+
+def keep_manual_dependencies(file_name, function_name, arguments):
+ #pylint: disable=unused-argument
+ """Declare test functions with unusual dependencies here."""
+ # If there are no arguments, we can't do any useful work. Assume that if
+ # there are dependencies, they are warranted.
+ if not arguments:
+ return True
+ # When PSA_ERROR_NOT_SUPPORTED is expected, usually, at least one of the
+ # constants mentioned in the test should not be supported. It isn't
+ # possible to determine which one in a systematic way. So let the programmer
+ # decide.
+ if arguments[-1] == 'PSA_ERROR_NOT_SUPPORTED':
+ return True
+ return False
+
+def process_data_stanza(stanza, file_name, test_case_number):
+ """Update PSA crypto dependencies in one Mbed TLS test case.
+
+ stanza is the test case text (including the description, the dependencies,
+ the line with the function and arguments, and optionally comments). Return
+ a new stanza with an updated dependency line, preserving everything else
+ (description, comments, arguments, etc.).
+ """
+ if not stanza.lstrip('\n'):
+ # Just blank lines
+ return stanza
+ # Expect 2 or 3 non-comment lines: description, optional dependencies,
+ # function-and-arguments.
+ content_matches = list(re.finditer(r'^[\t ]*([^\t #].*)$', stanza, re.M))
+ if len(content_matches) < 2:
+ raise Exception('Not enough content lines in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ if len(content_matches) > 3:
+ raise Exception('Too many content lines in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ arguments = content_matches[-1].group(0).split(':')
+ function_name = arguments.pop(0)
+ if keep_manual_dependencies(file_name, function_name, arguments):
+ return stanza
+ if len(content_matches) == 2:
+ # Insert a line for the dependencies. If it turns out that there are
+ # no dependencies, we'll remove that empty line below.
+ dependencies_location = content_matches[-1].start()
+ text_before = stanza[:dependencies_location]
+ text_after = '\n' + stanza[dependencies_location:]
+ old_dependencies = []
+ dependencies_leader = 'depends_on:'
+ else:
+ dependencies_match = content_matches[-2]
+ text_before = stanza[:dependencies_match.start()]
+ text_after = stanza[dependencies_match.end():]
+ old_dependencies = dependencies_match.group(0).split(':')
+ dependencies_leader = old_dependencies.pop(0) + ':'
+ if dependencies_leader != 'depends_on:':
+ raise Exception('Next-to-last line does not start with "depends_on:"'
+ ' in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ new_dependencies = updated_dependencies(file_name, function_name, arguments,
+ old_dependencies)
+ if new_dependencies:
+ stanza = (text_before +
+ dependencies_leader + ':'.join(new_dependencies) +
+ text_after)
+ else:
+ # The dependencies have become empty. Remove the depends_on: line.
+ assert text_after[0] == '\n'
+ stanza = text_before + text_after[1:]
+ return stanza
+
+def process_data_file(file_name, old_content):
+ """Update PSA crypto dependencies in an Mbed TLS test suite data file.
+
+ Process old_content (the old content of the file) and return the new content.
+ """
+ old_stanzas = old_content.split('\n\n')
+ new_stanzas = [process_data_stanza(stanza, file_name, n)
+ for n, stanza in enumerate(old_stanzas, start=1)]
+ return '\n\n'.join(new_stanzas)
+
+def update_file(file_name, old_content, new_content):
+ """Update the given file with the given new content.
+
+ Replace the existing file. The previous version is renamed to *.bak.
+ Don't modify the file if the content was unchanged.
+ """
+ if new_content == old_content:
+ return
+ backup = file_name + '.bak'
+ tmp = file_name + '.tmp'
+ with open(tmp, 'w', encoding='utf-8') as new_file:
+ new_file.write(new_content)
+ os.replace(file_name, backup)
+ os.replace(tmp, file_name)
+
+def process_file(file_name):
+ """Update PSA crypto dependencies in an Mbed TLS test suite data file.
+
+ Replace the existing file. The previous version is renamed to *.bak.
+ Don't modify the file if the content was unchanged.
+ """
+ old_content = open(file_name, encoding='utf-8').read()
+ if file_name.endswith('.data'):
+ new_content = process_data_file(file_name, old_content)
+ else:
+ raise Exception('File type not recognized: {}'
+ .format(file_name))
+ update_file(file_name, old_content, new_content)
+
+def main(args):
+ for file_name in args:
+ process_file(file_name)
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tests/scripts/tcp_client.pl b/tests/scripts/tcp_client.pl
index 11cbf1b..17f824e 100755
--- a/tests/scripts/tcp_client.pl
+++ b/tests/scripts/tcp_client.pl
@@ -4,6 +4,21 @@
# Usage: tcp_client.pl HOSTNAME PORT DATA1 RESPONSE1
# DATA: hex-encoded data to send to the server
# RESPONSE: regexp that must match the server's response
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
use warnings;
use strict;
diff --git a/tests/scripts/test-ref-configs.pl b/tests/scripts/test-ref-configs.pl
index b07329c..dd24db7 100755
--- a/tests/scripts/test-ref-configs.pl
+++ b/tests/scripts/test-ref-configs.pl
@@ -1,10 +1,21 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# test-ref-configs.pl
#
-# This file is part of mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2013-2016, ARM Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -17,14 +28,15 @@
use strict;
my %configs = (
- 'config-mini-tls1_1.h' => {
- 'compat' => '-m tls1_1 -f \'^DES-CBC3-SHA$\|^TLS-RSA-WITH-3DES-EDE-CBC-SHA$\'',
+ 'config-ccm-psk-tls1_2.h' => {
+ 'compat' => '-m tls1_2 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
+ },
+ 'config-no-entropy.h' => {
},
'config-suite-b.h' => {
'compat' => "-m tls1_2 -f 'ECDHE-ECDSA.*AES.*GCM' -p mbedTLS",
},
- 'config-ccm-psk-tls1_2.h' => {
- 'compat' => '-m tls1_2 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
+ 'config-symmetric-only.h' => {
},
'config-thread.h' => {
'opt' => '-f ECJPAKE.*nolog',
@@ -48,7 +60,7 @@
-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
-my $config_h = 'include/mbedtls/config.h';
+my $config_h = 'include/mbedtls/mbedtls_config.h';
system( "cp $config_h $config_h.bak" ) and die;
sub abort {
@@ -58,6 +70,15 @@
exit 1;
}
+# Create a seedfile for configurations that enable MBEDTLS_ENTROPY_NV_SEED.
+# For test purposes, this doesn't have to be cryptographically random.
+if (!-e "tests/seedfile" || -s "tests/seedfile" < 64) {
+ local *SEEDFILE;
+ open SEEDFILE, ">tests/seedfile" or die;
+ print SEEDFILE "*" x 64 or die;
+ close SEEDFILE or die;
+}
+
while( my ($conf, $data) = each %configs ) {
system( "cp $config_h.bak $config_h" ) and die;
system( "make clean" ) and die;
@@ -65,6 +86,7 @@
print "\n******************************************\n";
print "* Testing configuration: $conf\n";
print "******************************************\n";
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = $conf;
system( "cp configs/$conf $config_h" )
and abort "Failed to activate $conf\n";
diff --git a/tests/scripts/test_config_script.py b/tests/scripts/test_config_script.py
new file mode 100755
index 0000000..e230e3c
--- /dev/null
+++ b/tests/scripts/test_config_script.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+
+"""Test helper for the Mbed TLS configuration file tool
+
+Run config.py with various parameters and write the results to files.
+
+This is a harness to help regression testing, not a functional tester.
+Sample usage:
+
+ test_config_script.py -d old
+ ## Modify config.py and/or mbedtls_config.h ##
+ test_config_script.py -d new
+ diff -ru old new
+"""
+
+## Copyright The Mbed TLS Contributors
+## SPDX-License-Identifier: Apache-2.0
+##
+## Licensed under the Apache License, Version 2.0 (the "License"); you may
+## not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+
+import argparse
+import glob
+import os
+import re
+import shutil
+import subprocess
+
+OUTPUT_FILE_PREFIX = 'config-'
+
+def output_file_name(directory, stem, extension):
+ return os.path.join(directory,
+ '{}{}.{}'.format(OUTPUT_FILE_PREFIX,
+ stem, extension))
+
+def cleanup_directory(directory):
+ """Remove old output files."""
+ for extension in []:
+ pattern = output_file_name(directory, '*', extension)
+ filenames = glob.glob(pattern)
+ for filename in filenames:
+ os.remove(filename)
+
+def prepare_directory(directory):
+ """Create the output directory if it doesn't exist yet.
+
+ If there are old output files, remove them.
+ """
+ if os.path.exists(directory):
+ cleanup_directory(directory)
+ else:
+ os.makedirs(directory)
+
+def guess_presets_from_help(help_text):
+ """Figure out what presets the script supports.
+
+ help_text should be the output from running the script with --help.
+ """
+ # Try the output format from config.py
+ hits = re.findall(r'\{([-\w,]+)\}', help_text)
+ for hit in hits:
+ words = set(hit.split(','))
+ if 'get' in words and 'set' in words and 'unset' in words:
+ words.remove('get')
+ words.remove('set')
+ words.remove('unset')
+ return words
+ # Try the output format from config.pl
+ hits = re.findall(r'\n +([-\w]+) +- ', help_text)
+ if hits:
+ return hits
+ raise Exception("Unable to figure out supported presets. Pass the '-p' option.")
+
+def list_presets(options):
+ """Return the list of presets to test.
+
+ The list is taken from the command line if present, otherwise it is
+ extracted from running the config script with --help.
+ """
+ if options.presets:
+ return re.split(r'[ ,]+', options.presets)
+ else:
+ help_text = subprocess.run([options.script, '--help'],
+ check=False, # config.pl --help returns 255
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT).stdout
+ return guess_presets_from_help(help_text.decode('ascii'))
+
+def run_one(options, args, stem_prefix='', input_file=None):
+ """Run the config script with the given arguments.
+
+ Take the original content from input_file if specified, defaulting
+ to options.input_file if input_file is None.
+
+ Write the following files, where xxx contains stem_prefix followed by
+ a filename-friendly encoding of args:
+ * config-xxx.h: modified file.
+ * config-xxx.out: standard output.
+ * config-xxx.err: standard output.
+ * config-xxx.status: exit code.
+
+ Return ("xxx+", "path/to/config-xxx.h") which can be used as
+ stem_prefix and input_file to call this function again with new args.
+ """
+ if input_file is None:
+ input_file = options.input_file
+ stem = stem_prefix + '-'.join(args)
+ data_filename = output_file_name(options.output_directory, stem, 'h')
+ stdout_filename = output_file_name(options.output_directory, stem, 'out')
+ stderr_filename = output_file_name(options.output_directory, stem, 'err')
+ status_filename = output_file_name(options.output_directory, stem, 'status')
+ shutil.copy(input_file, data_filename)
+ # Pass only the file basename, not the full path, to avoid getting the
+ # directory name in error messages, which would make comparisons
+ # between output directories more difficult.
+ cmd = [os.path.abspath(options.script),
+ '-f', os.path.basename(data_filename)]
+ with open(stdout_filename, 'wb') as out:
+ with open(stderr_filename, 'wb') as err:
+ status = subprocess.call(cmd + args,
+ cwd=options.output_directory,
+ stdin=subprocess.DEVNULL,
+ stdout=out, stderr=err)
+ with open(status_filename, 'w') as status_file:
+ status_file.write('{}\n'.format(status))
+ return stem + "+", data_filename
+
+### A list of symbols to test with.
+### This script currently tests what happens when you change a symbol from
+### having a value to not having a value or vice versa. This is not
+### necessarily useful behavior, and we may not consider it a bug if
+### config.py stops handling that case correctly.
+TEST_SYMBOLS = [
+ 'CUSTOM_SYMBOL', # does not exist
+ 'MBEDTLS_AES_C', # set, no value
+ 'MBEDTLS_MPI_MAX_SIZE', # unset, has a value
+ 'MBEDTLS_NO_UDBL_DIVISION', # unset, in "System support"
+ 'MBEDTLS_PLATFORM_ZEROIZE_ALT', # unset, in "Customisation configuration options"
+]
+
+def run_all(options):
+ """Run all the command lines to test."""
+ presets = list_presets(options)
+ for preset in presets:
+ run_one(options, [preset])
+ for symbol in TEST_SYMBOLS:
+ run_one(options, ['get', symbol])
+ (stem, filename) = run_one(options, ['set', symbol])
+ run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
+ run_one(options, ['--force', 'set', symbol])
+ (stem, filename) = run_one(options, ['set', symbol, 'value'])
+ run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
+ run_one(options, ['--force', 'set', symbol, 'value'])
+ run_one(options, ['unset', symbol])
+
+def main():
+ """Command line entry point."""
+ parser = argparse.ArgumentParser(description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('-d', metavar='DIR',
+ dest='output_directory', required=True,
+ help="""Output directory.""")
+ parser.add_argument('-f', metavar='FILE',
+ dest='input_file', default='include/mbedtls/mbedtls_config.h',
+ help="""Config file (default: %(default)s).""")
+ parser.add_argument('-p', metavar='PRESET,...',
+ dest='presets',
+ help="""Presets to test (default: guessed from --help).""")
+ parser.add_argument('-s', metavar='FILE',
+ dest='script', default='scripts/config.py',
+ help="""Configuration script (default: %(default)s).""")
+ options = parser.parse_args()
+ prepare_directory(options.output_directory)
+ run_all(options)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/test_generate_test_code.py b/tests/scripts/test_generate_test_code.py
new file mode 100755
index 0000000..9bf66f1
--- /dev/null
+++ b/tests/scripts/test_generate_test_code.py
@@ -0,0 +1,1699 @@
+#!/usr/bin/env python3
+# Unit test for generate_test_code.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Unit tests for generate_test_code.py
+"""
+
+from io import StringIO
+from unittest import TestCase, main as unittest_main
+from unittest.mock import patch
+
+from generate_test_code import gen_dependencies, gen_dependencies_one_line
+from generate_test_code import gen_function_wrapper, gen_dispatch
+from generate_test_code import parse_until_pattern, GeneratorInputError
+from generate_test_code import parse_suite_dependencies
+from generate_test_code import parse_function_dependencies
+from generate_test_code import parse_function_arguments, parse_function_code
+from generate_test_code import parse_functions, END_HEADER_REGEX
+from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split
+from generate_test_code import parse_test_data, gen_dep_check
+from generate_test_code import gen_expression_check, write_dependencies
+from generate_test_code import write_parameters, gen_suite_dep_checks
+from generate_test_code import gen_from_test_data
+
+
+class GenDep(TestCase):
+ """
+ Test suite for function gen_dep()
+ """
+
+ def test_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['DEP1', 'DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_disabled_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', '!DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if !defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* !DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* !DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_mixed_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', 'DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* !DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_empty_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ dep_start, dep_end = gen_dependencies(dependencies)
+ self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly')
+ self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly')
+
+ def test_large_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ count = 10
+ for i in range(count):
+ dependencies.append('DEP%d' % i)
+ dep_start, dep_end = gen_dependencies(dependencies)
+ self.assertEqual(len(dep_start.splitlines()), count,
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(len(dep_end.splitlines()), count,
+ 'Preprocessor generated incorrectly')
+
+
+class GenDepOneLine(TestCase):
+ """
+ Test Suite for testing gen_dependencies_one_line()
+ """
+
+ def test_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['DEP1', 'DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_disabled_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', '!DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_mixed_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', 'DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_empty_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly')
+
+ def test_large_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ count = 10
+ for i in range(count):
+ dependencies.append('DEP%d' % i)
+ dep_str = gen_dependencies_one_line(dependencies)
+ expected = '#if ' + ' && '.join(['defined(%s)' %
+ x for x in dependencies])
+ self.assertEqual(dep_str, expected,
+ 'Preprocessor generated incorrectly')
+
+
+class GenFunctionWrapper(TestCase):
+ """
+ Test Suite for testing gen_function_wrapper()
+ """
+
+ def test_params_unpack(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd'))
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+
+ test_a( a, b, c, d );
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_local(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a',
+ 'int x = 1;', ('x', 'b', 'c', 'd'))
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+int x = 1;
+ test_a( x, b, c, d );
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_empty_params(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a', '', ())
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+ (void)params;
+
+ test_a( );
+}
+'''
+ self.assertEqual(code, expected)
+
+
+class GenDispatch(TestCase):
+ """
+ Test suite for testing gen_dispatch()
+ """
+
+ def test_dispatch(self):
+ """
+ Test that dispatch table entry is generated correctly.
+ :return:
+ """
+ code = gen_dispatch('test_a', ['DEP1', 'DEP2'])
+ expected = '''
+#if defined(DEP1) && defined(DEP2)
+ test_a_wrapper,
+#else
+ NULL,
+#endif
+'''
+ self.assertEqual(code, expected)
+
+ def test_empty_dependencies(self):
+ """
+ Test empty dependency list.
+ :return:
+ """
+ code = gen_dispatch('test_a', [])
+ expected = '''
+ test_a_wrapper,
+'''
+ self.assertEqual(code, expected)
+
+
+class StringIOWrapper(StringIO):
+ """
+ file like class to mock file object in tests.
+ """
+ def __init__(self, file_name, data, line_no=0):
+ """
+ Init file handle.
+
+ :param file_name:
+ :param data:
+ :param line_no:
+ """
+ super(StringIOWrapper, self).__init__(data)
+ self.line_no = line_no
+ self.name = file_name
+
+ def next(self):
+ """
+ Iterator method. This method overrides base class's
+ next method and extends the next method to count the line
+ numbers as each line is read.
+
+ :return: Line read from file.
+ """
+ parent = super(StringIOWrapper, self)
+ line = parent.__next__()
+ return line
+
+ def readline(self, _length=0):
+ """
+ Wrap the base class readline.
+
+ :param length:
+ :return:
+ """
+ line = super(StringIOWrapper, self).readline()
+ if line is not None:
+ self.line_no += 1
+ return line
+
+
+class ParseUntilPattern(TestCase):
+ """
+ Test Suite for testing parse_until_pattern().
+ """
+
+ def test_suite_headers(self):
+ """
+ Test that suite headers are parsed correctly.
+
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ expected = '''#line 1 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data, line_no=0)
+ headers = parse_until_pattern(stream, END_HEADER_REGEX)
+ self.assertEqual(headers, expected)
+
+ def test_line_no(self):
+ """
+ Test that #line is set to correct line no. in source .function file.
+
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ offset_line_no = 5
+ expected = '''#line %d "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+''' % (offset_line_no + 1)
+ stream = StringIOWrapper('test_suite_ut.function', data,
+ offset_line_no)
+ headers = parse_until_pattern(stream, END_HEADER_REGEX)
+ self.assertEqual(headers, expected)
+
+ def test_no_end_header_comment(self):
+ """
+ Test that InvalidFileFormat is raised when end header comment is
+ missing.
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_until_pattern, stream,
+ END_HEADER_REGEX)
+
+
+class ParseSuiteDependencies(TestCase):
+ """
+ Test Suite for testing parse_suite_dependencies().
+ """
+
+ def test_suite_dependencies(self):
+ """
+
+ :return:
+ """
+ data = '''
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+ expected = ['MBEDTLS_ECP_C']
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ dependencies = parse_suite_dependencies(stream)
+ self.assertEqual(dependencies, expected)
+
+ def test_no_end_dep_comment(self):
+ """
+ Test that InvalidFileFormat is raised when end dep comment is missing.
+ :return:
+ """
+ data = '''
+* depends_on:MBEDTLS_ECP_C
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_suite_dependencies,
+ stream)
+
+ def test_dependencies_split(self):
+ """
+ Test that InvalidFileFormat is raised when end dep comment is missing.
+ :return:
+ """
+ data = '''
+ * depends_on:MBEDTLS_ECP_C:A:B: C : D :F : G: !H
+ * END_DEPENDENCIES
+ */
+'''
+ expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H']
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ dependencies = parse_suite_dependencies(stream)
+ self.assertEqual(dependencies, expected)
+
+
+class ParseFuncDependencies(TestCase):
+ """
+ Test Suite for testing parse_function_dependencies()
+ """
+
+ def test_function_dependencies(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */'
+ expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO']
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, expected)
+
+ def test_no_dependencies(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE */'
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, [])
+
+ def test_tolerance(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/'
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F'])
+
+
+class ParseFuncSignature(TestCase):
+ """
+ Test Suite for parse_function_arguments().
+ """
+
+ def test_int_and_char_params(self):
+ """
+ Test int and char parameters parsing
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, int b, int result )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, ['char*', 'int', 'int'])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, ['(char *) params[0]',
+ '*( (int *) params[1] )',
+ '*( (int *) params[2] )'])
+
+ def test_hex_params(self):
+ """
+ Test hex parameters parsing
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, data_t * h, int result )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, ['char*', 'hex', 'int'])
+ self.assertEqual(local,
+ ' data_t data1 = {(uint8_t *) params[1], '
+ '*( (uint32_t *) params[2] )};\n')
+ self.assertEqual(arg_dispatch, ['(char *) params[0]',
+ '&data1',
+ '*( (int *) params[3] )'])
+
+ def test_unsupported_arg(self):
+ """
+ Test unsupported arguments (not among int, char * and data_t)
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, data_t * h, char result )'
+ self.assertRaises(ValueError, parse_function_arguments, line)
+
+ def test_no_params(self):
+ """
+ Test no parameters.
+ :return:
+ """
+ line = 'void entropy_threshold()'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, [])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, [])
+
+
+class ParseFunctionCode(TestCase):
+ """
+ Test suite for testing parse_function_code()
+ """
+
+ def test_no_function(self):
+ """
+ Test no test function found.
+ :return:
+ """
+ data = '''
+No
+test
+function
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err_msg = 'file: test_suite_ut.function - Test functions not found!'
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ def test_no_end_case_comment(self):
+ """
+ Test missing end case.
+ :return:
+ """
+ data = '''
+void test_func()
+{
+}
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err_msg = r'file: test_suite_ut.function - '\
+ 'end case pattern .*? not found!'
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ @patch("generate_test_code.parse_function_arguments")
+ def test_function_called(self,
+ parse_function_arguments_mock):
+ """
+ Test parse_function_code()
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ data = '''
+void test_func()
+{
+}
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_function_code,
+ stream, [], [])
+ self.assertTrue(parse_function_arguments_mock.called)
+ parse_function_arguments_mock.assert_called_with('void test_func()\n')
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_return(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test generated code.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func()
+{
+ ba ba black sheep
+ have you any wool
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ name, arg, code, dispatch_code = parse_function_code(stream, [], [])
+
+ self.assertTrue(parse_function_arguments_mock.called)
+ parse_function_arguments_mock.assert_called_with('void func()\n')
+ gen_function_wrapper_mock.assert_called_with('test_func', '', [])
+ self.assertEqual(name, 'test_func')
+ self.assertEqual(arg, [])
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ ;
+}
+'''
+ self.assertEqual(code, expected)
+ self.assertEqual(dispatch_code, "\n test_func_wrapper,\n")
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_with_exit_label(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test when exit label is present.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_non_void_function(self):
+ """
+ Test invalid signature (non void).
+ :return:
+ """
+ data = 'int entropy_threshold( char * a, data_t * h, int result )'
+ err_msg = 'file: test_suite_ut.function - Test functions not found!'
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_functio_name_on_newline(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test when exit label is present.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void
+
+
+func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void
+
+
+test_func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+
+class ParseFunction(TestCase):
+ """
+ Test Suite for testing parse_functions()
+ """
+
+ @patch("generate_test_code.parse_until_pattern")
+ def test_begin_header(self, parse_until_pattern_mock):
+ """
+ Test that begin header is checked and parse_until_pattern() is called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_until_pattern_mock.side_effect = stop
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_until_pattern")
+ def test_begin_helper(self, parse_until_pattern_mock):
+ """
+ Test that begin helper is checked and parse_until_pattern() is called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_until_pattern_mock.side_effect = stop
+ data = '''/* BEGIN_SUITE_HELPERS */
+void print_hello_world()
+{
+ printf("Hello World!\n");
+}
+/* END_SUITE_HELPERS */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_until_pattern_mock.assert_called_with(stream,
+ END_SUITE_HELPERS_REGEX)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_suite_dependencies")
+ def test_begin_dep(self, parse_suite_dependencies_mock):
+ """
+ Test that begin dep is checked and parse_suite_dependencies() is
+ called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_suite_dependencies_mock.side_effect = stop
+ data = '''/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_suite_dependencies_mock.assert_called_with(stream)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_function_dependencies")
+ def test_begin_function_dep(self, func_mock):
+ """
+ Test that begin dep is checked and parse_function_dependencies() is
+ called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ func_mock.side_effect = stop
+
+ dependencies_str = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+ data = '''%svoid test_func()
+{
+}
+''' % dependencies_str
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ func_mock.assert_called_with(dependencies_str)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_function_code")
+ @patch("generate_test_code.parse_function_dependencies")
+ def test_return(self, func_mock1, func_mock2):
+ """
+ Test that begin case is checked and parse_function_code() is called.
+ :return:
+ """
+ func_mock1.return_value = []
+ in_func_code = '''void test_func()
+{
+}
+'''
+ func_dispatch = '''
+ test_func_wrapper,
+'''
+ func_mock2.return_value = 'test_func', [],\
+ in_func_code, func_dispatch
+ dependencies_str = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+ data = '''%svoid test_func()
+{
+}
+''' % dependencies_str
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(stream)
+ func_mock1.assert_called_with(dependencies_str)
+ func_mock2.assert_called_with(stream, [], [])
+ self.assertEqual(stream.line_no, 5)
+ self.assertEqual(suite_dependencies, [])
+ expected_dispatch_code = '''/* Function Id: 0 */
+
+ test_func_wrapper,
+'''
+ self.assertEqual(dispatch_code, expected_dispatch_code)
+ self.assertEqual(func_code, in_func_code)
+ self.assertEqual(func_info, {'test_func': (0, [])})
+
+ def test_parsing(self):
+ """
+ Test case parsing.
+ :return:
+ """
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func1()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func2()
+{
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(stream)
+ self.assertEqual(stream.line_no, 23)
+ self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C'])
+
+ expected_dispatch_code = '''/* Function Id: 0 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+ test_func1_wrapper,
+#else
+ NULL,
+#endif
+/* Function Id: 1 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+ test_func2_wrapper,
+#else
+ NULL,
+#endif
+'''
+ self.assertEqual(dispatch_code, expected_dispatch_code)
+ expected_func_code = '''#if defined(MBEDTLS_ECP_C)
+#line 2 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 13 "test_suite_ut.function"
+void test_func1()
+{
+exit:
+ ;
+}
+
+void test_func1_wrapper( void ** params )
+{
+ (void)params;
+
+ test_func1( );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 19 "test_suite_ut.function"
+void test_func2()
+{
+exit:
+ ;
+}
+
+void test_func2_wrapper( void ** params )
+{
+ (void)params;
+
+ test_func2( );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#endif /* MBEDTLS_ECP_C */
+'''
+ self.assertEqual(func_code, expected_func_code)
+ self.assertEqual(func_info, {'test_func1': (0, []),
+ 'test_func2': (1, [])})
+
+ def test_same_function_name(self):
+ """
+ Test name conflict.
+ :return:
+ """
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_functions, stream)
+
+
+class EscapedSplit(TestCase):
+ """
+ Test suite for testing escaped_split().
+ Note: Since escaped_split() output is used to write back to the
+ intermediate data file. Any escape characters in the input are
+ retained in the output.
+ """
+
+ def test_invalid_input(self):
+ """
+ Test when input split character is not a character.
+ :return:
+ """
+ self.assertRaises(ValueError, escaped_split, '', 'string')
+
+ def test_empty_string(self):
+ """
+ Test empty string input.
+ :return:
+ """
+ splits = escaped_split('', ':')
+ self.assertEqual(splits, [])
+
+ def test_no_escape(self):
+ """
+ Test with no escape character. The behaviour should be same as
+ str.split()
+ :return:
+ """
+ test_str = 'yahoo:google'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, test_str.split(':'))
+
+ def test_escaped_input(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\:google:facebook'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\:google', 'facebook'])
+
+ def test_escaped_escape(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\\:google:facebook'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook'])
+
+ def test_all_at_once(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\\', r'google',
+ r'facebook\:instagram\\',
+ r'bbc\\', r'wikipedia'])
+
+
+class ParseTestData(TestCase):
+ """
+ Test suite for parse test data.
+ """
+
+ def test_parser(self):
+ """
+ Test that tests are parsed correctly from data file.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+Diffie-Hellman full exchange #3
+dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271"
+
+Diffie-Hellman selftest
+dhm_selftest:
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ # List of (name, function_name, dependencies, args)
+ tests = list(parse_test_data(stream))
+ test1, test2, test3, test4 = tests
+ self.assertEqual(test1[0], 'Diffie-Hellman full exchange #1')
+ self.assertEqual(test1[1], 'dhm_do_dhm')
+ self.assertEqual(test1[2], [])
+ self.assertEqual(test1[3], ['10', '"23"', '10', '"5"'])
+
+ self.assertEqual(test2[0], 'Diffie-Hellman full exchange #2')
+ self.assertEqual(test2[1], 'dhm_do_dhm')
+ self.assertEqual(test2[2], [])
+ self.assertEqual(test2[3], ['10', '"93450983094850938450983409623"',
+ '10', '"9345098304850938450983409622"'])
+
+ self.assertEqual(test3[0], 'Diffie-Hellman full exchange #3')
+ self.assertEqual(test3[1], 'dhm_do_dhm')
+ self.assertEqual(test3[2], [])
+ self.assertEqual(test3[3], ['10',
+ '"9345098382739712938719287391879381271"',
+ '10',
+ '"9345098792137312973297123912791271"'])
+
+ self.assertEqual(test4[0], 'Diffie-Hellman selftest')
+ self.assertEqual(test4[1], 'dhm_selftest')
+ self.assertEqual(test4[2], [])
+ self.assertEqual(test4[3], [])
+
+ def test_with_dependencies(self):
+ """
+ Test that tests with dependencies are parsed.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ # List of (name, function_name, dependencies, args)
+ tests = list(parse_test_data(stream))
+ test1, test2 = tests
+ self.assertEqual(test1[0], 'Diffie-Hellman full exchange #1')
+ self.assertEqual(test1[1], 'dhm_do_dhm')
+ self.assertEqual(test1[2], ['YAHOO'])
+ self.assertEqual(test1[3], ['10', '"23"', '10', '"5"'])
+
+ self.assertEqual(test2[0], 'Diffie-Hellman full exchange #2')
+ self.assertEqual(test2[1], 'dhm_do_dhm')
+ self.assertEqual(test2[2], [])
+ self.assertEqual(test2[3], ['10', '"93450983094850938450983409623"',
+ '10', '"9345098304850938450983409622"'])
+
+ def test_no_args(self):
+ """
+ Test GeneratorInputError is raised when test function name and
+ args line is missing.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err = None
+ try:
+ for _, _, _, _ in parse_test_data(stream):
+ pass
+ except GeneratorInputError as err:
+ self.assertEqual(type(err), GeneratorInputError)
+
+ def test_incomplete_data(self):
+ """
+ Test GeneratorInputError is raised when test function name
+ and args line is missing.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err = None
+ try:
+ for _, _, _, _ in parse_test_data(stream):
+ pass
+ except GeneratorInputError as err:
+ self.assertEqual(type(err), GeneratorInputError)
+
+
+class GenDepCheck(TestCase):
+ """
+ Test suite for gen_dep_check(). It is assumed this function is
+ called with valid inputs.
+ """
+
+ def test_gen_dep_check(self):
+ """
+ Test that dependency check code generated correctly.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+#if defined(YAHOO)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;"""
+ out = gen_dep_check(5, 'YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_not_defined_dependency(self):
+ """
+ Test dependency with !.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+#if !defined(YAHOO)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;"""
+ out = gen_dep_check(5, '!YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_empty_dependency(self):
+ """
+ Test invalid dependency input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!')
+
+ def test_negative_dep_id(self):
+ """
+ Test invalid dependency input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO')
+
+
+class GenExpCheck(TestCase):
+ """
+ Test suite for gen_expression_check(). It is assumed this function
+ is called with valid inputs.
+ """
+
+ def test_gen_exp_check(self):
+ """
+ Test that expression check code generated correctly.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+ *out_value = YAHOO;
+ }
+ break;"""
+ out = gen_expression_check(5, 'YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_invalid_expression(self):
+ """
+ Test invalid expression input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_expression_check, 5, '')
+
+ def test_negative_exp_id(self):
+ """
+ Test invalid expression id.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_expression_check,
+ -1, 'YAHOO')
+
+
+class WriteDependencies(TestCase):
+ """
+ Test suite for testing write_dependencies.
+ """
+
+ def test_no_test_dependencies(self):
+ """
+ Test when test dependencies input is empty.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = write_dependencies(stream, [], unique_dependencies)
+ self.assertEqual(dep_check_code, '')
+ self.assertEqual(len(unique_dependencies), 0)
+ self.assertEqual(stream.getvalue(), '')
+
+ def test_unique_dep_ids(self):
+ """
+
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'],
+ unique_dependencies)
+ expect_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP3)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 2:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expect_dep_check_code)
+ self.assertEqual(len(unique_dependencies), 3)
+ self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n')
+
+ def test_dep_id_repeat(self):
+ """
+
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = ''
+ dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'],
+ unique_dependencies)
+ dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'],
+ unique_dependencies)
+ dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'],
+ unique_dependencies)
+ expect_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP3)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 2:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expect_dep_check_code)
+ self.assertEqual(len(unique_dependencies), 3)
+ self.assertEqual(stream.getvalue(),
+ 'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n')
+
+
+class WriteParams(TestCase):
+ """
+ Test Suite for testing write_parameters().
+ """
+
+ def test_no_params(self):
+ """
+ Test with empty test_args
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream, [], [], unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(), '\n')
+
+ def test_no_exp_param(self):
+ """
+ Test when there is no macro or expression in the params.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"',
+ '0'],
+ ['char*', 'hex', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0\n')
+
+ def test_hex_format_int_param(self):
+ """
+ Test int parameter in hex format.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream,
+ ['"Yahoo"', '"abcdef00"', '0xAA'],
+ ['char*', 'hex', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0xAA\n')
+
+ def test_with_exp_param(self):
+ """
+ Test when there is macro or expression in the params.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream,
+ ['"Yahoo"', '"abcdef00"', '0',
+ 'MACRO1', 'MACRO2', 'MACRO3'],
+ ['char*', 'hex', 'int',
+ 'int', 'int', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 3)
+ self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;
+ case 2:
+ {
+ *out_value = MACRO3;
+ }
+ break;'''
+ self.assertEqual(expression_code, expected_expression_code)
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1'
+ ':exp:2\n')
+
+ def test_with_repeat_calls(self):
+ """
+ Test when write_parameter() is called with same macro or expression.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = ''
+ expression_code += write_parameters(stream,
+ ['"Yahoo"', 'MACRO1', 'MACRO2'],
+ ['char*', 'int', 'int'],
+ unique_expressions)
+ expression_code += write_parameters(stream,
+ ['"abcdef00"', 'MACRO2', 'MACRO3'],
+ ['hex', 'int', 'int'],
+ unique_expressions)
+ expression_code += write_parameters(stream,
+ ['0', 'MACRO3', 'MACRO1'],
+ ['int', 'int', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 3)
+ self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;
+ case 2:
+ {
+ *out_value = MACRO3;
+ }
+ break;'''
+ self.assertEqual(expression_code, expected_expression_code)
+ expected_data_file = ''':char*:"Yahoo":exp:0:exp:1
+:hex:"abcdef00":exp:1:exp:2
+:int:0:exp:2:exp:0
+'''
+ self.assertEqual(stream.getvalue(), expected_data_file)
+
+
+class GenTestSuiteDependenciesChecks(TestCase):
+ """
+ Test suite for testing gen_suite_dep_checks()
+ """
+ def test_empty_suite_dependencies(self):
+ """
+ Test with empty suite_dependencies list.
+
+ :return:
+ """
+ dep_check_code, expression_code = \
+ gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE')
+ self.assertEqual(dep_check_code, 'DEP_CHECK_CODE')
+ self.assertEqual(expression_code, 'EXPRESSION_CODE')
+
+ def test_suite_dependencies(self):
+ """
+ Test with suite_dependencies list.
+
+ :return:
+ """
+ dep_check_code, expression_code = \
+ gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE',
+ 'EXPRESSION_CODE')
+ expected_dep_check_code = '''
+#if defined(SUITE_DEP)
+DEP_CHECK_CODE
+#endif
+'''
+ expected_expression_code = '''
+#if defined(SUITE_DEP)
+EXPRESSION_CODE
+#endif
+'''
+ self.assertEqual(dep_check_code, expected_dep_check_code)
+ self.assertEqual(expression_code, expected_expression_code)
+
+ def test_no_dep_no_exp(self):
+ """
+ Test when there are no dependency and expression code.
+ :return:
+ """
+ dep_check_code, expression_code = gen_suite_dep_checks([], '', '')
+ self.assertEqual(dep_check_code, '')
+ self.assertEqual(expression_code, '')
+
+
+class GenFromTestData(TestCase):
+ """
+ Test suite for gen_from_test_data()
+ """
+
+ @staticmethod
+ @patch("generate_test_code.write_dependencies")
+ @patch("generate_test_code.write_parameters")
+ @patch("generate_test_code.gen_suite_dep_checks")
+ def test_intermediate_data_file(func_mock1,
+ write_parameters_mock,
+ write_dependencies_mock):
+ """
+ Test that intermediate data file is written with expected data.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func1': (1, ('int',))}
+ suite_dependencies = []
+ write_parameters_mock.side_effect = write_parameters
+ write_dependencies_mock.side_effect = write_dependencies
+ func_mock1.side_effect = gen_suite_dep_checks
+ gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies)
+ write_dependencies_mock.assert_called_with(out_data_f,
+ ['DEP1'], ['DEP1'])
+ write_parameters_mock.assert_called_with(out_data_f, ['0'],
+ ('int',), [])
+ expected_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ func_mock1.assert_called_with(
+ suite_dependencies, expected_dep_check_code, '')
+
+ def test_function_not_found(self):
+ """
+ Test that AssertError is raised when function info in not found.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func2': (1, ('int',))}
+ suite_dependencies = []
+ self.assertRaises(GeneratorInputError, gen_from_test_data,
+ data_f, out_data_f, func_info, suite_dependencies)
+
+ def test_different_func_args(self):
+ """
+ Test that AssertError is raised when no. of parameters and
+ function args differ.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func2': (1, ('int', 'hex'))}
+ suite_dependencies = []
+ self.assertRaises(GeneratorInputError, gen_from_test_data, data_f,
+ out_data_f, func_info, suite_dependencies)
+
+ def test_output(self):
+ """
+ Test that intermediate data file is written with expected data.
+ :return:
+ """
+ data = '''
+My test 1
+depends_on:DEP1
+func1:0:0xfa:MACRO1:MACRO2
+
+My test 2
+depends_on:DEP1:DEP2
+func2:"yahoo":88:MACRO1
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')),
+ 'test_func2': (1, ('char*', 'int', 'int'))}
+ suite_dependencies = []
+ dep_check_code, expression_code = \
+ gen_from_test_data(data_f, out_data_f, func_info,
+ suite_dependencies)
+ expected_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ expected_data = '''My test 1
+depends_on:0
+0:int:0:int:0xfa:exp:0:exp:1
+
+My test 2
+depends_on:0:1
+1:char*:"yahoo":int:88:exp:0
+
+'''
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expected_dep_check_code)
+ self.assertEqual(out_data_f.getvalue(), expected_data)
+ self.assertEqual(expression_code, expected_expression_code)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff --git a/tests/scripts/test_psa_constant_names.py b/tests/scripts/test_psa_constant_names.py
new file mode 100755
index 0000000..07c8ab2
--- /dev/null
+++ b/tests/scripts/test_psa_constant_names.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+"""Test the program psa_constant_names.
+Gather constant names from header files and test cases. Compile a C program
+to print out their numerical values, feed these numerical values to
+psa_constant_names, and check that the output is the original name.
+Return 0 if all test cases pass, 1 if the output was not always as expected,
+or 1 (with a Python backtrace) if there was an operational error.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+from collections import namedtuple
+import os
+import re
+import subprocess
+import sys
+from typing import Iterable, List, Optional, Tuple
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import c_build_helper
+from mbedtls_dev.macro_collector import InputsForTest, PSAMacroEnumerator
+from mbedtls_dev import typing_util
+
+def gather_inputs(headers: Iterable[str],
+ test_suites: Iterable[str],
+ inputs_class=InputsForTest) -> PSAMacroEnumerator:
+ """Read the list of inputs to test psa_constant_names with."""
+ inputs = inputs_class()
+ for header in headers:
+ inputs.parse_header(header)
+ for test_cases in test_suites:
+ inputs.parse_test_cases(test_cases)
+ inputs.add_numerical_values()
+ inputs.gather_arguments()
+ return inputs
+
+def run_c(type_word: str,
+ expressions: Iterable[str],
+ include_path: Optional[str] = None,
+ keep_c: bool = False) -> List[str]:
+ """Generate and run a program to print out numerical values of C expressions."""
+ if type_word == 'status':
+ cast_to = 'long'
+ printf_format = '%ld'
+ else:
+ cast_to = 'unsigned long'
+ printf_format = '0x%08lx'
+ return c_build_helper.get_c_expression_values(
+ cast_to, printf_format,
+ expressions,
+ caller='test_psa_constant_names.py for {} values'.format(type_word),
+ file_label=type_word,
+ header='#include <psa/crypto.h>',
+ include_path=include_path,
+ keep_c=keep_c
+ )
+
+NORMALIZE_STRIP_RE = re.compile(r'\s+')
+def normalize(expr: str) -> str:
+ """Normalize the C expression so as not to care about trivial differences.
+
+ Currently "trivial differences" means whitespace.
+ """
+ return re.sub(NORMALIZE_STRIP_RE, '', expr)
+
+def collect_values(inputs: InputsForTest,
+ type_word: str,
+ include_path: Optional[str] = None,
+ keep_c: bool = False) -> Tuple[List[str], List[str]]:
+ """Generate expressions using known macro names and calculate their values.
+
+ Return a list of pairs of (expr, value) where expr is an expression and
+ value is a string representation of its integer value.
+ """
+ names = inputs.get_names(type_word)
+ expressions = sorted(inputs.generate_expressions(names))
+ values = run_c(type_word, expressions,
+ include_path=include_path, keep_c=keep_c)
+ return expressions, values
+
+class Tests:
+ """An object representing tests and their results."""
+
+ Error = namedtuple('Error',
+ ['type', 'expression', 'value', 'output'])
+
+ def __init__(self, options) -> None:
+ self.options = options
+ self.count = 0
+ self.errors = [] #type: List[Tests.Error]
+
+ def run_one(self, inputs: InputsForTest, type_word: str) -> None:
+ """Test psa_constant_names for the specified type.
+
+ Run the program on the names for this type.
+ Use the inputs to figure out what arguments to pass to macros that
+ take arguments.
+ """
+ expressions, values = collect_values(inputs, type_word,
+ include_path=self.options.include,
+ keep_c=self.options.keep_c)
+ output_bytes = subprocess.check_output([self.options.program,
+ type_word] + values)
+ output = output_bytes.decode('ascii')
+ outputs = output.strip().split('\n')
+ self.count += len(expressions)
+ for expr, value, output in zip(expressions, values, outputs):
+ if self.options.show:
+ sys.stdout.write('{} {}\t{}\n'.format(type_word, value, output))
+ if normalize(expr) != normalize(output):
+ self.errors.append(self.Error(type=type_word,
+ expression=expr,
+ value=value,
+ output=output))
+
+ def run_all(self, inputs: InputsForTest) -> None:
+ """Run psa_constant_names on all the gathered inputs."""
+ for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
+ 'key_type', 'key_usage']:
+ self.run_one(inputs, type_word)
+
+ def report(self, out: typing_util.Writable) -> None:
+ """Describe each case where the output is not as expected.
+
+ Write the errors to ``out``.
+ Also write a total.
+ """
+ for error in self.errors:
+ out.write('For {} "{}", got "{}" (value: {})\n'
+ .format(error.type, error.expression,
+ error.output, error.value))
+ out.write('{} test cases'.format(self.count))
+ if self.errors:
+ out.write(', {} FAIL\n'.format(len(self.errors)))
+ else:
+ out.write(' PASS\n')
+
+HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
+TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
+
+def main():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ parser.add_argument('--include', '-I',
+ action='append', default=['include'],
+ help='Directory for header files')
+ parser.add_argument('--keep-c',
+ action='store_true', dest='keep_c', default=False,
+ help='Keep the intermediate C file')
+ parser.add_argument('--no-keep-c',
+ action='store_false', dest='keep_c',
+ help='Don\'t keep the intermediate C file (default)')
+ parser.add_argument('--program',
+ default='programs/psa/psa_constant_names',
+ help='Program to test')
+ parser.add_argument('--show',
+ action='store_true',
+ help='Show tested values on stdout')
+ parser.add_argument('--no-show',
+ action='store_false', dest='show',
+ help='Don\'t show tested values (default)')
+ options = parser.parse_args()
+ headers = [os.path.join(options.include[0], h) for h in HEADERS]
+ inputs = gather_inputs(headers, TEST_SUITES)
+ tests = Tests(options)
+ tests.run_all(inputs)
+ tests.report(sys.stdout)
+ if tests.errors:
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/test_zeroize.gdb b/tests/scripts/test_zeroize.gdb
index 617ab55..66c6304 100644
--- a/tests/scripts/test_zeroize.gdb
+++ b/tests/scripts/test_zeroize.gdb
@@ -1,8 +1,19 @@
# test_zeroize.gdb
#
-# This file is part of Mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2018, Arm Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
@@ -17,7 +28,7 @@
# seem to be a mechanism to reliably check whether the zeroize calls are being
# eliminated by compiler optimizations from within the compiled program. The
# problem is that a compiler would typically remove what it considers to be
-# "unecessary" assignments as part of redundant code elimination. To identify
+# "unnecessary" assignments as part of redundant code elimination. To identify
# such code, the compilar will create some form dependency graph between
# reads and writes to variables (among other situations). It will then use this
# data structure to remove redundant code that does not have an impact on the
@@ -31,18 +42,13 @@
# the compiler potentially has a bug.
#
# Note: This test requires that the test program is compiled with -g3.
-#
-# WARNING: There does not seem to be a mechanism in GDB scripts to set a
-# breakpoint at the end of a function (probably because there are a lot of
-# complications as function can have multiple exit points, etc). Therefore, it
-# was necessary to hard-code the line number of the breakpoint in the zeroize.c
-# test app. The assumption is that zeroize.c is a simple test app that does not
-# change often (as opposed to the actual library code), so the breakpoint line
-# number does not need to be updated often.
set confirm off
+
file ./programs/test/zeroize
-break zeroize.c:100
+
+search GDB_BREAK_HERE
+break $_
set args ./programs/test/zeroize.c
run
diff --git a/tests/scripts/travis-log-failure.sh b/tests/scripts/travis-log-failure.sh
index 9866ca7..249b3f8 100755
--- a/tests/scripts/travis-log-failure.sh
+++ b/tests/scripts/travis-log-failure.sh
@@ -2,9 +2,20 @@
# travis-log-failure.sh
#
-# This file is part of mbed TLS (https://tls.mbed.org)
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0
#
-# Copyright (c) 2016, ARM Limited, All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
#
# Purpose
#
diff --git a/tests/scripts/yotta-build.sh b/tests/scripts/yotta-build.sh
deleted file mode 100755
index 4bae34a..0000000
--- a/tests/scripts/yotta-build.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/sh
-
-# yotta-build.sh
-#
-# This file is part of mbed TLS (https://tls.mbed.org)
-#
-# Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
-#
-# Purpose
-#
-# To run test builds of the yotta module for all supported targets.
-
-set -eu
-
-check_tools()
-{
- for TOOL in "$@"; do
- if ! `hash "$TOOL" >/dev/null 2>&1`; then
- echo "$TOOL not found!" >&2
- exit 1
- fi
- done
-}
-
-yotta_build()
-{
- TARGET=$1
-
- echo; echo "*** $TARGET (release) ***"
- yt -t $TARGET build
-
- echo; echo "*** $TARGET (debug) ***"
- yt -t $TARGET build -d
-}
-
-# Make sure the tools we need are available.
-check_tools "arm-none-eabi-gcc" "armcc" "yotta"
-
-yotta/create-module.sh
-cd yotta/module
-yt update || true # needs network
-
-if uname -a | grep 'Linux.*x86' >/dev/null; then
- yotta_build x86-linux-native
-fi
-if uname -a | grep 'Darwin.*x86' >/dev/null; then
- yotta_build x86-osx-native
-fi
-
-# armcc build tests.
-yotta_build frdm-k64f-armcc
-#yotta_build nordic-nrf51822-16k-armcc
-
-# arm-none-eabi-gcc build tests.
-yotta_build frdm-k64f-gcc
-#yotta_build st-nucleo-f401re-gcc # dirent
-#yotta_build stm32f429i-disco-gcc # fails in mbed-hal-st-stm32f4
-#yotta_build nordic-nrf51822-16k-gcc # fails in minar-platform
-#yotta_build bbc-microbit-classic-gcc # fails in minar-platform
-#yotta_build st-stm32f439zi-gcc # fails in mbed-hal-st-stm32f4
-#yotta_build st-stm32f429i-disco-gcc # fails in mbed-hal-st-stm32f4