Merge pull request #9225 from gilles-peskine-arm/tls13-debug-print-uint32-fix

Fix uint32_t printed as unsigned int
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 306cf02..e476675 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -323,11 +323,11 @@
                 ${CMAKE_CURRENT_SOURCE_DIR}/tests
             COMMAND
                 "${MBEDTLS_PYTHON_EXECUTABLE}"
-                "${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_keys.py"
+                "${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_keys.py"
                 "--output"
                 "${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_keys.h"
             DEPENDS
-                ${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_keys.py
+                ${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_keys.py
         )
         add_custom_target(test_keys_header DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_keys.h)
         add_custom_command(
@@ -337,11 +337,11 @@
                 ${CMAKE_CURRENT_SOURCE_DIR}/tests
             COMMAND
                 "${MBEDTLS_PYTHON_EXECUTABLE}"
-                "${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_cert_macros.py"
+                "${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_cert_macros.py"
                 "--output"
                 "${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_certs.h"
             DEPENDS
-                ${CMAKE_CURRENT_SOURCE_DIR}/tests/scripts/generate_test_cert_macros.py
+                ${CMAKE_CURRENT_SOURCE_DIR}/framework/scripts/generate_test_cert_macros.py
         )
         add_custom_target(test_certs_header DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tests/src/test_certs.h)
         add_dependencies(mbedtls_test test_keys_header test_certs_header)
diff --git a/docs/architecture/psa-crypto-implementation-structure.md b/docs/architecture/psa-crypto-implementation-structure.md
index 7e0e37d..0954602 100644
--- a/docs/architecture/psa-crypto-implementation-structure.md
+++ b/docs/architecture/psa-crypto-implementation-structure.md
@@ -153,7 +153,7 @@
 
 ### Unit tests
 
-A number of unit tests are automatically generated by `tests/scripts/generate_psa_tests.py` based on the algorithms and key types declared in `include/psa/crypto_values.h` and `include/psa/crypto_extra.h`:
+A number of unit tests are automatically generated by `framework/scripts/generate_psa_tests.py` based on the algorithms and key types declared in `include/psa/crypto_values.h` and `include/psa/crypto_extra.h`:
 
 * Attempt to create a key with a key type that is not supported.
 * Attempt to perform an operation with a combination of key type and algorithm that is not valid or not supported.
diff --git a/docs/architecture/psa-shared-memory.md b/docs/architecture/psa-shared-memory.md
index ef3a6b0..283ffc6 100644
--- a/docs/architecture/psa-shared-memory.md
+++ b/docs/architecture/psa-shared-memory.md
@@ -663,7 +663,7 @@
 
 There now exists a more generic mechanism for making exactly this kind of transformation - the PSA test wrappers, which exist in the files `tests/include/test/psa_test_wrappers.h` and `tests/src/psa_test_wrappers.c`. These are wrappers around all PSA functions that allow testing code to be inserted at the start and end of a PSA function call.
 
-The test wrappers are generated by a script, although they are not automatically generated as part of the build process. Instead, they are checked into source control and must be manually updated when functions change by running `tests/scripts/generate_psa_wrappers.py`.
+The test wrappers are generated by a script, although they are not automatically generated as part of the build process. Instead, they are checked into source control and must be manually updated when functions change by running `framework/scripts/generate_psa_wrappers.py`.
 
 Poisoning code is added to these test wrappers where relevant in order to pre-poison and post-unpoison the parameters to the functions.
 
diff --git a/framework b/framework
index e156a8e..623c1b4 160000
--- a/framework
+++ b/framework
@@ -1 +1 @@
-Subproject commit e156a8eb8e6db88cdf0a3041fc7f645131eab16d
+Subproject commit 623c1b4532e8de64a5d82ea84a7496e64c370d15
diff --git a/scripts/abi_check.py b/scripts/abi_check.py
index ec0d473..f91d80e 100755
--- a/scripts/abi_check.py
+++ b/scripts/abi_check.py
@@ -326,8 +326,14 @@
     @staticmethod
     def _list_generated_test_data_files(git_worktree_path):
         """List the generated test data files."""
+        generate_psa_tests = 'framework/scripts/generate_psa_tests.py'
+        if not os.path.isfile(git_worktree_path + '/' + generate_psa_tests):
+            # The checked-out revision is from before generate_psa_tests.py
+            # was moved to the framework submodule. Use the old location.
+            generate_psa_tests = 'tests/scripts/generate_psa_tests.py'
+
         output = subprocess.check_output(
-            ['tests/scripts/generate_psa_tests.py', '--list'],
+            [generate_psa_tests, '--list'],
             cwd=git_worktree_path,
         ).decode('ascii')
         return [line for line in output.split('\n') if line]
@@ -353,8 +359,14 @@
             if 'storage_format' in filename:
                 storage_data_files.add(filename)
                 to_be_generated.add(filename)
+
+        generate_psa_tests = 'framework/scripts/generate_psa_tests.py'
+        if not os.path.isfile(git_worktree_path + '/' + generate_psa_tests):
+            # The checked-out revision is from before generate_psa_tests.py
+            # was moved to the framework submodule. Use the old location.
+            generate_psa_tests = 'tests/scripts/generate_psa_tests.py'
         subprocess.check_call(
-            ['tests/scripts/generate_psa_tests.py'] + sorted(to_be_generated),
+            [generate_psa_tests] + sorted(to_be_generated),
             cwd=git_worktree_path,
         )
         for test_file in sorted(storage_data_files):
diff --git a/scripts/code_style.py b/scripts/code_style.py
index 9e3c751..d3f89d9 100755
--- a/scripts/code_style.py
+++ b/scripts/code_style.py
@@ -75,8 +75,24 @@
     output = subprocess.check_output(["git", "ls-files"] + file_patterns,
                                      universal_newlines=True)
     src_files = output.split()
+
+    # When this script is called from a git hook, some environment variables
+    # are set by default which force all git commands to use the main repository
+    # (i.e. prevent us from performing commands on the framework repo).
+    # Create an environment without these variables for running commands on the
+    # framework repo.
+    framework_env = os.environ.copy()
+    # Get a list of environment vars that git sets
+    git_env_vars = subprocess.check_output(["git", "rev-parse", "--local-env-vars"],
+                                           universal_newlines=True)
+    # Remove the vars from the environment
+    for var in git_env_vars.split():
+        framework_env.pop(var, None)
+
     output = subprocess.check_output(["git", "-C", "framework", "ls-files"]
-                                     + file_patterns, universal_newlines=True)
+                                     + file_patterns,
+                                     universal_newlines=True,
+                                     env=framework_env)
     framework_src_files = output.split()
 
     if since:
@@ -89,7 +105,8 @@
         # ... the framework submodule
         cmd = ["git", "-C", "framework", "log", since + "..HEAD",
                "--name-only", "--pretty=", "--"] + framework_src_files
-        output = subprocess.check_output(cmd, universal_newlines=True)
+        output = subprocess.check_output(cmd, universal_newlines=True,
+                                         env=framework_env)
         committed_changed_files += ["framework/" + s for s in output.split()]
 
         # and also get all files with uncommitted changes in ...
@@ -100,7 +117,8 @@
         # ... the framework submodule
         cmd = ["git", "-C", "framework", "diff", "--name-only", "--"] + \
               framework_src_files
-        output = subprocess.check_output(cmd, universal_newlines=True)
+        output = subprocess.check_output(cmd, universal_newlines=True,
+                                         env=framework_env)
         uncommitted_changed_files += ["framework/" + s for s in output.split()]
 
         src_files = committed_changed_files + uncommitted_changed_files
diff --git a/scripts/make_generated_files.bat b/scripts/make_generated_files.bat
index 11bcb1a..f04f6b7 100644
--- a/scripts/make_generated_files.bat
+++ b/scripts/make_generated_files.bat
@@ -10,8 +10,8 @@
 python scripts\generate_ssl_debug_helpers.py || exit /b 1

 perl scripts\generate_visualc_files.pl || exit /b 1

 python scripts\generate_psa_constants.py || exit /b 1

-python tests\scripts\generate_bignum_tests.py || exit /b 1

-python tests\scripts\generate_ecp_tests.py || exit /b 1

-python tests\scripts\generate_psa_tests.py || exit /b 1

-python tests\scripts\generate_test_keys.py --output tests\src\test_keys.h || exit /b 1

-python tests\scripts\generate_test_cert_macros.py --output tests\src\test_certs.h || exit /b 1

+python framework\scripts\generate_bignum_tests.py || exit /b 1

+python framework\scripts\generate_ecp_tests.py || exit /b 1

+python framework\scripts\generate_psa_tests.py || exit /b 1

+python framework\scripts\generate_test_keys.py --output tests\src\test_keys.h || exit /b 1

+python framework\scripts\generate_test_cert_macros.py --output tests\src\test_certs.h || exit /b 1

diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index ffe3cc8..5bc38b4 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -21,7 +21,7 @@
 execute_process(
     COMMAND
         ${MBEDTLS_PYTHON_EXECUTABLE}
-        ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
+        ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
         --list-for-cmake
     WORKING_DIRECTORY
         ${CMAKE_CURRENT_SOURCE_DIR}/..
@@ -33,7 +33,7 @@
 execute_process(
     COMMAND
         ${MBEDTLS_PYTHON_EXECUTABLE}
-        ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
+        ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
         --list-for-cmake
     WORKING_DIRECTORY
         ${CMAKE_CURRENT_SOURCE_DIR}/..
@@ -45,7 +45,7 @@
 execute_process(
     COMMAND
         ${MBEDTLS_PYTHON_EXECUTABLE}
-        ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
+        ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
         --list-for-cmake
     WORKING_DIRECTORY
         ${CMAKE_CURRENT_SOURCE_DIR}/..
@@ -81,10 +81,10 @@
             ${CMAKE_CURRENT_SOURCE_DIR}/..
         COMMAND
             ${MBEDTLS_PYTHON_EXECUTABLE}
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
             --directory ${CMAKE_CURRENT_BINARY_DIR}/suites
         DEPENDS
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_bignum_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_bignum_tests.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_common.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_core.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_mod_raw.py
@@ -99,10 +99,10 @@
             ${CMAKE_CURRENT_SOURCE_DIR}/..
         COMMAND
             ${MBEDTLS_PYTHON_EXECUTABLE}
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
             --directory ${CMAKE_CURRENT_BINARY_DIR}/suites
         DEPENDS
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_ecp_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_ecp_tests.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/bignum_common.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/ecp.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/test_case.py
@@ -115,10 +115,10 @@
             ${CMAKE_CURRENT_SOURCE_DIR}/..
         COMMAND
             ${MBEDTLS_PYTHON_EXECUTABLE}
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
             --directory ${CMAKE_CURRENT_BINARY_DIR}/suites
         DEPENDS
-            ${CMAKE_CURRENT_SOURCE_DIR}/../tests/scripts/generate_psa_tests.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_psa_tests.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/crypto_data_tests.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/crypto_knowledge.py
             ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/mbedtls_framework/macro_collector.py
@@ -220,7 +220,7 @@
             test_suite_${data_name}.c
         COMMAND
             ${MBEDTLS_PYTHON_EXECUTABLE}
-            ${CMAKE_CURRENT_SOURCE_DIR}/scripts/generate_test_code.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_test_code.py
             -f ${CMAKE_CURRENT_SOURCE_DIR}/suites/test_suite_${suite_name}.function
             -d ${data_file}
             -t ${CMAKE_CURRENT_SOURCE_DIR}/suites/main_test.function
@@ -229,7 +229,7 @@
             --helpers-file ${CMAKE_CURRENT_SOURCE_DIR}/suites/helpers.function
             -o .
         DEPENDS
-            ${CMAKE_CURRENT_SOURCE_DIR}/scripts/generate_test_code.py
+            ${CMAKE_CURRENT_SOURCE_DIR}/../framework/scripts/generate_test_code.py
             ${CMAKE_CURRENT_SOURCE_DIR}/suites/test_suite_${suite_name}.function
             ${data_file}
             ${CMAKE_CURRENT_SOURCE_DIR}/suites/main_test.function
diff --git a/tests/Makefile b/tests/Makefile
index b7429ac..1d5c768 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -18,25 +18,25 @@
 
 .PHONY: generated_files
 GENERATED_BIGNUM_DATA_FILES := $(patsubst tests/%,%,$(shell \
-	$(PYTHON) scripts/generate_bignum_tests.py --list || \
+	$(PYTHON) ../framework/scripts/generate_bignum_tests.py --list || \
 	echo FAILED \
 ))
 ifeq ($(GENERATED_BIGNUM_DATA_FILES),FAILED)
-$(error "$(PYTHON) scripts/generate_bignum_tests.py --list" failed)
+$(error "$(PYTHON) ../framework/scripts/generate_bignum_tests.py --list" failed)
 endif
 GENERATED_ECP_DATA_FILES := $(patsubst tests/%,%,$(shell \
-	$(PYTHON) scripts/generate_ecp_tests.py --list || \
+	$(PYTHON) ../framework/scripts/generate_ecp_tests.py --list || \
 	echo FAILED \
 ))
 ifeq ($(GENERATED_ECP_DATA_FILES),FAILED)
-$(error "$(PYTHON) scripts/generate_ecp_tests.py --list" failed)
+$(error "$(PYTHON) ../framework/scripts/generate_ecp_tests.py --list" failed)
 endif
 GENERATED_PSA_DATA_FILES := $(patsubst tests/%,%,$(shell \
-	$(PYTHON) scripts/generate_psa_tests.py --list || \
+	$(PYTHON) ../framework/scripts/generate_psa_tests.py --list || \
 	echo FAILED \
 ))
 ifeq ($(GENERATED_PSA_DATA_FILES),FAILED)
-$(error "$(PYTHON) scripts/generate_psa_tests.py --list" failed)
+$(error "$(PYTHON) ../framework/scripts/generate_psa_tests.py --list" failed)
 endif
 GENERATED_FILES := $(GENERATED_PSA_DATA_FILES) $(GENERATED_ECP_DATA_FILES) $(GENERATED_BIGNUM_DATA_FILES)
 generated_files: $(GENERATED_FILES) src/test_keys.h src/test_certs.h
@@ -49,7 +49,7 @@
 # a separate instance of the recipe for each output file.
 .SECONDARY: generated_bignum_test_data generated_ecp_test_data generated_psa_test_data
 $(GENERATED_BIGNUM_DATA_FILES): $(gen_file_dep) generated_bignum_test_data
-generated_bignum_test_data: scripts/generate_bignum_tests.py
+generated_bignum_test_data: ../framework/scripts/generate_bignum_tests.py
 generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_common.py
 generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_core.py
 generated_bignum_test_data: ../framework/scripts/mbedtls_framework/bignum_mod_raw.py
@@ -58,20 +58,20 @@
 generated_bignum_test_data: ../framework/scripts/mbedtls_framework/test_data_generation.py
 generated_bignum_test_data:
 	echo "  Gen   $(GENERATED_BIGNUM_DATA_FILES)"
-	$(PYTHON) scripts/generate_bignum_tests.py
+	$(PYTHON) ../framework/scripts/generate_bignum_tests.py
 
 $(GENERATED_ECP_DATA_FILES): $(gen_file_dep) generated_ecp_test_data
-generated_ecp_test_data: scripts/generate_ecp_tests.py
+generated_ecp_test_data: ../framework/scripts/generate_ecp_tests.py
 generated_ecp_test_data: ../framework/scripts/mbedtls_framework/bignum_common.py
 generated_ecp_test_data: ../framework/scripts/mbedtls_framework/ecp.py
 generated_ecp_test_data: ../framework/scripts/mbedtls_framework/test_case.py
 generated_ecp_test_data: ../framework/scripts/mbedtls_framework/test_data_generation.py
 generated_ecp_test_data:
 	echo "  Gen   $(GENERATED_ECP_DATA_FILES)"
-	$(PYTHON) scripts/generate_ecp_tests.py
+	$(PYTHON) ../framework/scripts/generate_ecp_tests.py
 
 $(GENERATED_PSA_DATA_FILES): $(gen_file_dep) generated_psa_test_data
-generated_psa_test_data: scripts/generate_psa_tests.py
+generated_psa_test_data: ../framework/scripts/generate_psa_tests.py
 generated_psa_test_data: ../framework/scripts/mbedtls_framework/crypto_data_tests.py
 generated_psa_test_data: ../framework/scripts/mbedtls_framework/crypto_knowledge.py
 generated_psa_test_data: ../framework/scripts/mbedtls_framework/macro_collector.py
@@ -90,7 +90,7 @@
 generated_psa_test_data: suites/test_suite_psa_crypto_metadata.data
 generated_psa_test_data:
 	echo "  Gen   $(GENERATED_PSA_DATA_FILES) ..."
-	$(PYTHON) scripts/generate_psa_tests.py
+	$(PYTHON) ../framework/scripts/generate_psa_tests.py
 
 # A test application is built for each suites/test_suite_*.data file.
 # Application name is same as .data file's base name and can be
@@ -112,12 +112,12 @@
 
 mbedtls_test: $(MBEDTLS_TEST_OBJS)
 
-src/test_certs.h: scripts/generate_test_cert_macros.py \
-				  $($(PYTHON) scripts/generate_test_cert_macros.py --list-dependencies)
-	$(PYTHON) scripts/generate_test_cert_macros.py --output $@
+src/test_certs.h: ../framework/scripts/generate_test_cert_macros.py \
+				  $($(PYTHON) ../framework/scripts/generate_test_cert_macros.py --list-dependencies)
+	$(PYTHON) ../framework/scripts/generate_test_cert_macros.py --output $@
 
-src/test_keys.h: scripts/generate_test_keys.py
-	$(PYTHON) scripts/generate_test_keys.py --output $@
+src/test_keys.h: ../framework/scripts/generate_test_keys.py
+	$(PYTHON) ../framework/scripts/generate_test_keys.py --output $@
 
 TEST_OBJS_DEPS = $(wildcard include/test/*.h include/test/*/*.h)
 ifdef RECORD_PSA_STATUS_COVERAGE_LOG
@@ -159,9 +159,9 @@
 # dot in .c file's base name.
 #
 .SECONDEXPANSION:
-%.c: suites/$$(firstword $$(subst ., ,$$*)).function suites/%.data scripts/generate_test_code.py suites/helpers.function suites/main_test.function suites/host_test.function
+%.c: suites/$$(firstword $$(subst ., ,$$*)).function suites/%.data ../framework/scripts/generate_test_code.py suites/helpers.function suites/main_test.function suites/host_test.function
 	echo "  Gen   $@"
-	$(PYTHON) scripts/generate_test_code.py -f suites/$(firstword $(subst ., ,$*)).function \
+	$(PYTHON) ../framework/scripts/generate_test_code.py -f suites/$(firstword $(subst ., ,$*)).function \
 		-d suites/$*.data \
 		-t suites/main_test.function \
 		-p suites/host_test.function \
diff --git a/tests/data_files/test_certs.h.jinja2 b/tests/data_files/test_certs.h.jinja2
index f2657d8..c420c79 100644
--- a/tests/data_files/test_certs.h.jinja2
+++ b/tests/data_files/test_certs.h.jinja2
@@ -5,7 +5,7 @@
  *  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
  */
 
-/* THIS FILE is generated by `tests/scripts/generate_test_cert_macros.py` */
+/* THIS FILE is generated by `framework/scripts/generate_test_cert_macros.py` */
 /* *INDENT-OFF* */
 
 {% for mode, name, value in macros %}
diff --git a/tests/scripts/all.sh b/tests/scripts/all.sh
index c0abf05..0bd6d18 100755
--- a/tests/scripts/all.sh
+++ b/tests/scripts/all.sh
@@ -1903,43 +1903,26 @@
 }
 
 component_test_tls1_2_default_stream_cipher_only () {
-    msg "build: default with only stream cipher"
-
-    # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C
-    scripts/config.py unset MBEDTLS_GCM_C
-    scripts/config.py unset MBEDTLS_CCM_C
-    scripts/config.py unset MBEDTLS_CHACHAPOLY_C
-    #Disable TLS 1.3 (as no AEAD)
-    scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
-    # Disable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
-    scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
-    # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
-    scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
-    # Enable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
-    scripts/config.py set MBEDTLS_CIPHER_NULL_CIPHER
-    # Modules that depend on AEAD
-    scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
-    scripts/config.py unset MBEDTLS_SSL_TICKET_C
-
-    make
-
-    msg "test: default with only stream cipher"
-    make test
-
-    # Not running ssl-opt.sh because most tests require a non-NULL ciphersuite.
-}
-
-component_test_tls1_2_default_stream_cipher_only_use_psa () {
     msg "build: default with only stream cipher use psa"
 
     scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+    scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
     # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_GCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CHACHA20_POLY1305
+    # Note: The three unsets below are to be removed for Mbed TLS 4.0
     scripts/config.py unset MBEDTLS_GCM_C
     scripts/config.py unset MBEDTLS_CCM_C
     scripts/config.py unset MBEDTLS_CHACHAPOLY_C
     #Disable TLS 1.3 (as no AEAD)
     scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+    # Disable CBC. Note: When implemented, PSA_WANT_ALG_CBC_MAC will also need to be unset here to fully disable CBC
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CBC_NO_PADDING
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CBC_PKCS7
     # Disable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+    # Note: The unset below is to be removed for 4.0
     scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
     # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
     scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
@@ -1958,45 +1941,23 @@
 }
 
 component_test_tls1_2_default_cbc_legacy_cipher_only () {
-    msg "build: default with only CBC-legacy cipher"
-
-    # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
-    scripts/config.py unset MBEDTLS_GCM_C
-    scripts/config.py unset MBEDTLS_CCM_C
-    scripts/config.py unset MBEDTLS_CHACHAPOLY_C
-    #Disable TLS 1.3 (as no AEAD)
-    scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
-    # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
-    scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
-    # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
-    scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
-    # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
-    scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
-    # Modules that depend on AEAD
-    scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
-    scripts/config.py unset MBEDTLS_SSL_TICKET_C
-
-    make
-
-    msg "test: default with only CBC-legacy cipher"
-    make test
-
-    msg "test: default with only CBC-legacy cipher - ssl-opt.sh (subset)"
-    tests/ssl-opt.sh -f "TLS 1.2"
-}
-
-component_test_tls1_2_deafult_cbc_legacy_cipher_only_use_psa () {
     msg "build: default with only CBC-legacy cipher use psa"
 
     scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+    scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
     # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_GCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CHACHA20_POLY1305
+    # Note: The three unsets below are to be removed for Mbed TLS 4.0
     scripts/config.py unset MBEDTLS_GCM_C
     scripts/config.py unset MBEDTLS_CCM_C
     scripts/config.py unset MBEDTLS_CHACHAPOLY_C
     #Disable TLS 1.3 (as no AEAD)
     scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
     # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
-    scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+    scripts/config.py -f $CRYPTO_CONFIG_H set PSA_WANT_ALG_CBC_NO_PADDING
     # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
     scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
     # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
@@ -2015,45 +1976,23 @@
 }
 
 component_test_tls1_2_default_cbc_legacy_cbc_etm_cipher_only () {
-    msg "build: default with only CBC-legacy and CBC-EtM ciphers"
-
-    # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
-    scripts/config.py unset MBEDTLS_GCM_C
-    scripts/config.py unset MBEDTLS_CCM_C
-    scripts/config.py unset MBEDTLS_CHACHAPOLY_C
-    #Disable TLS 1.3 (as no AEAD)
-    scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
-    # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
-    scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
-    # Enable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
-    scripts/config.py set MBEDTLS_SSL_ENCRYPT_THEN_MAC
-    # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
-    scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
-    # Modules that depend on AEAD
-    scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
-    scripts/config.py unset MBEDTLS_SSL_TICKET_C
-
-    make
-
-    msg "test: default with only CBC-legacy and CBC-EtM ciphers"
-    make test
-
-    msg "test: default with only CBC-legacy and CBC-EtM ciphers - ssl-opt.sh (subset)"
-    tests/ssl-opt.sh -f "TLS 1.2"
-}
-
-component_test_tls1_2_default_cbc_legacy_cbc_etm_cipher_only_use_psa () {
     msg "build: default with only CBC-legacy and CBC-EtM ciphers use psa"
 
     scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+    scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
     # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_GCM
+    scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CHACHA20_POLY1305
+    # Note: The three unsets below are to be removed for Mbed TLS 4.0
     scripts/config.py unset MBEDTLS_GCM_C
     scripts/config.py unset MBEDTLS_CCM_C
     scripts/config.py unset MBEDTLS_CHACHAPOLY_C
     #Disable TLS 1.3 (as no AEAD)
     scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
     # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
-    scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+    scripts/config.py -f $CRYPTO_CONFIG_H set PSA_WANT_ALG_CBC_NO_PADDING
     # Enable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
     scripts/config.py set MBEDTLS_SSL_ENCRYPT_THEN_MAC
     # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
@@ -6205,7 +6144,7 @@
     # unittest writes out mundane stuff like number or tests run on stderr.
     # Our convention is to reserve stderr for actual errors, and write
     # harmless info on stdout so it can be suppress with --quiet.
-    ./tests/scripts/test_generate_test_code.py 2>&1
+    ./framework/scripts/test_generate_test_code.py 2>&1
 
     msg "unit test: translate_ciphers.py"
     python3 -m unittest tests/scripts/translate_ciphers.py 2>&1
diff --git a/tests/scripts/check-generated-files.sh b/tests/scripts/check-generated-files.sh
index 049721b..e740f33 100755
--- a/tests/scripts/check-generated-files.sh
+++ b/tests/scripts/check-generated-files.sh
@@ -128,10 +128,10 @@
 
 # These checks are common to Mbed TLS and TF-PSA-Crypto
 check scripts/generate_psa_constants.py programs/psa/psa_constant_names_generated.c
-check tests/scripts/generate_bignum_tests.py $(tests/scripts/generate_bignum_tests.py --list)
-check tests/scripts/generate_ecp_tests.py $(tests/scripts/generate_ecp_tests.py --list)
-check tests/scripts/generate_psa_tests.py $(tests/scripts/generate_psa_tests.py --list)
-check tests/scripts/generate_test_keys.py tests/src/test_keys.h
+check framework/scripts/generate_bignum_tests.py $(framework/scripts/generate_bignum_tests.py --list)
+check framework/scripts/generate_ecp_tests.py $(framework/scripts/generate_ecp_tests.py --list)
+check framework/scripts/generate_psa_tests.py $(framework/scripts/generate_psa_tests.py --list)
+check framework/scripts/generate_test_keys.py tests/src/test_keys.h
 check scripts/generate_driver_wrappers.py $library_dir/psa_crypto_driver_wrappers.h $library_dir/psa_crypto_driver_wrappers_no_static.c
 
 # Additional checks for Mbed TLS only
@@ -140,7 +140,7 @@
     check scripts/generate_query_config.pl programs/test/query_config.c
     check scripts/generate_features.pl library/version_features.c
     check scripts/generate_ssl_debug_helpers.py library/ssl_debug_helpers_generated.c
-    check tests/scripts/generate_test_cert_macros.py tests/src/test_certs.h
+    check framework/scripts/generate_test_cert_macros.py tests/src/test_certs.h
     # generate_visualc_files enumerates source files (library/*.c). It doesn't
     # care about their content, but the files must exist. So it must run after
     # the step that creates or updates these files.
@@ -150,4 +150,4 @@
 # Generated files that are present in the repository even in the development
 # branch. (This is intended to be temporary, until the generator scripts are
 # fully reviewed and the build scripts support a generated header file.)
-check tests/scripts/generate_psa_wrappers.py tests/include/test/psa_test_wrappers.h tests/src/psa_test_wrappers.c
+check framework/scripts/generate_psa_wrappers.py tests/include/test/psa_test_wrappers.h tests/src/psa_test_wrappers.c
diff --git a/tests/scripts/check-python-files.sh b/tests/scripts/check-python-files.sh
index 32b5baf..77102ba 100755
--- a/tests/scripts/check-python-files.sh
+++ b/tests/scripts/check-python-files.sh
@@ -55,14 +55,14 @@
 fi
 
 echo 'Running pylint ...'
-$PYTHON -m pylint framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py || {
+$PYTHON -m pylint framework/scripts/*.py framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py || {
     echo >&2 "pylint reported errors"
     ret=1
 }
 
 echo
 echo 'Running mypy ...'
-$PYTHON -m mypy framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py ||
+$PYTHON -m mypy framework/scripts/*.py framework/scripts/mbedtls_framework/*.py scripts/*.py tests/scripts/*.py ||
   ret=1
 
 exit $ret
diff --git a/tests/scripts/generate_bignum_tests.py b/tests/scripts/generate_bignum_tests.py
deleted file mode 100755
index b855e91..0000000
--- a/tests/scripts/generate_bignum_tests.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python3
-"""Generate test data for bignum functions.
-
-With no arguments, generate all test data. With non-option arguments,
-generate only the specified files.
-
-Class structure:
-
-Child classes of test_data_generation.BaseTarget (file targets) represent an output
-file. These indicate where test cases will be written to, for all subclasses of
-this target. Multiple file targets should not reuse a `target_basename`.
-
-Each subclass derived from a file target can either be:
-  - A concrete class, representing a test function, which generates test cases.
-  - An abstract class containing shared methods and attributes, not associated
-        with a test function. An example is BignumOperation, which provides
-        common features used for bignum binary operations.
-
-Both concrete and abstract subclasses can be derived from, to implement
-additional test cases (see BignumCmp and BignumCmpAbs for examples of deriving
-from abstract and concrete classes).
-
-
-Adding test case generation for a function:
-
-A subclass representing the test function should be added, deriving from a
-file target such as BignumTarget. This test class must set/implement the
-following:
-  - test_function: the function name from the associated .function file.
-  - test_name: a descriptive name or brief summary to refer to the test
-        function.
-  - arguments(): a method to generate the list of arguments required for the
-        test_function.
-  - generate_function_tests(): a method to generate TestCases for the function.
-        This should create instances of the class with required input data, and
-        call `.create_test_case()` to yield the TestCase.
-
-Additional details and other attributes/methods are given in the documentation
-of BaseTarget in test_data_generation.py.
-"""
-
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-import sys
-
-from abc import ABCMeta
-from typing import List
-
-import scripts_path # pylint: disable=unused-import
-from mbedtls_framework import test_data_generation
-from mbedtls_framework import bignum_common
-# Import modules containing additional test classes
-# Test function classes in these modules will be registered by
-# the framework
-from mbedtls_framework import bignum_core, bignum_mod_raw, bignum_mod # pylint: disable=unused-import
-
-class BignumTarget(test_data_generation.BaseTarget):
-    #pylint: disable=too-few-public-methods
-    """Target for bignum (legacy) test case generation."""
-    target_basename = 'test_suite_bignum.generated'
-
-
-class BignumOperation(bignum_common.OperationCommon, BignumTarget,
-                      metaclass=ABCMeta):
-    #pylint: disable=abstract-method
-    """Common features for bignum operations in legacy tests."""
-    unique_combinations_only = True
-    input_values = [
-        "", "0", "-", "-0",
-        "7b", "-7b",
-        "0000000000000000123", "-0000000000000000123",
-        "1230000000000000000", "-1230000000000000000"
-    ]
-
-    def description_suffix(self) -> str:
-        #pylint: disable=no-self-use # derived classes need self
-        """Text to add at the end of the test case description."""
-        return ""
-
-    def description(self) -> str:
-        """Generate a description for the test case.
-
-        If not set, case_description uses the form A `symbol` B, where symbol
-        is used to represent the operation. Descriptions of each value are
-        generated to provide some context to the test case.
-        """
-        if not self.case_description:
-            self.case_description = "{} {} {}".format(
-                self.value_description(self.arg_a),
-                self.symbol,
-                self.value_description(self.arg_b)
-            )
-            description_suffix = self.description_suffix()
-            if description_suffix:
-                self.case_description += " " + description_suffix
-        return super().description()
-
-    @staticmethod
-    def value_description(val) -> str:
-        """Generate a description of the argument val.
-
-        This produces a simple description of the value, which is used in test
-        case naming to add context.
-        """
-        if val == "":
-            return "0 (null)"
-        if val == "-":
-            return "negative 0 (null)"
-        if val == "0":
-            return "0 (1 limb)"
-
-        if val[0] == "-":
-            tmp = "negative"
-            val = val[1:]
-        else:
-            tmp = "positive"
-        if val[0] == "0":
-            tmp += " with leading zero limb"
-        elif len(val) > 10:
-            tmp = "large " + tmp
-        return tmp
-
-
-class BignumCmp(BignumOperation):
-    """Test cases for bignum value comparison."""
-    count = 0
-    test_function = "mpi_cmp_mpi"
-    test_name = "MPI compare"
-    input_cases = [
-        ("-2", "-3"),
-        ("-2", "-2"),
-        ("2b4", "2b5"),
-        ("2b5", "2b6")
-        ]
-
-    def __init__(self, val_a, val_b) -> None:
-        super().__init__(val_a, val_b)
-        self._result = int(self.int_a > self.int_b) - int(self.int_a < self.int_b)
-        self.symbol = ["<", "==", ">"][self._result + 1]
-
-    def result(self) -> List[str]:
-        return [str(self._result)]
-
-
-class BignumCmpAbs(BignumCmp):
-    """Test cases for absolute bignum value comparison."""
-    count = 0
-    test_function = "mpi_cmp_abs"
-    test_name = "MPI compare (abs)"
-
-    def __init__(self, val_a, val_b) -> None:
-        super().__init__(val_a.strip("-"), val_b.strip("-"))
-
-
-class BignumAdd(BignumOperation):
-    """Test cases for bignum value addition."""
-    count = 0
-    symbol = "+"
-    test_function = "mpi_add_mpi"
-    test_name = "MPI add"
-    input_cases = bignum_common.combination_pairs(
-        [
-            "1c67967269c6", "9cde3",
-            "-1c67967269c6", "-9cde3",
-        ]
-    )
-
-    def __init__(self, val_a: str, val_b: str) -> None:
-        super().__init__(val_a, val_b)
-        self._result = self.int_a + self.int_b
-
-    def description_suffix(self) -> str:
-        if (self.int_a >= 0 and self.int_b >= 0):
-            return "" # obviously positive result or 0
-        if (self.int_a <= 0 and self.int_b <= 0):
-            return "" # obviously negative result or 0
-        # The sign of the result is not obvious, so indicate it
-        return ", result{}0".format('>' if self._result > 0 else
-                                    '<' if self._result < 0 else '=')
-
-    def result(self) -> List[str]:
-        return [bignum_common.quote_str("{:x}".format(self._result))]
-
-if __name__ == '__main__':
-    # Use the section of the docstring relevant to the CLI as description
-    test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/tests/scripts/generate_ecp_tests.py b/tests/scripts/generate_ecp_tests.py
deleted file mode 100755
index c5281ad..0000000
--- a/tests/scripts/generate_ecp_tests.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python3
-"""Generate test data for ecp functions.
-
-The command line usage, class structure and available methods are the same
-as in generate_bignum_tests.py.
-"""
-
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-import sys
-
-import scripts_path # pylint: disable=unused-import
-from mbedtls_framework import test_data_generation
-# Import modules containing additional test classes
-# Test function classes in these modules will be registered by
-# the framework
-from mbedtls_framework import ecp # pylint: disable=unused-import
-
-if __name__ == '__main__':
-    # Use the section of the docstring relevant to the CLI as description
-    test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/tests/scripts/generate_pkcs7_tests.py b/tests/scripts/generate_pkcs7_tests.py
deleted file mode 100755
index 0e484b0..0000000
--- a/tests/scripts/generate_pkcs7_tests.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python3
-#
-#  Copyright The Mbed TLS Contributors
-#  SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-#
-
-"""
-Make fuzz like testing for pkcs7 tests
-Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
- - It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
-    function is defined in test_suite_pkcs7.function
- - This is not meant to be portable code, if anything it is meant to serve as
-   documentation for showing how those ugly tests in test_suite_pkcs7.data were created
-"""
-
-
-import sys
-from os.path import exists
-
-PKCS7_TEST_FILE = "../suites/test_suite_pkcs7.data"
-
-class Test: # pylint: disable=too-few-public-methods
-    """
-    A instance of a test in test_suite_pkcs7.data
-    """
-    def __init__(self, name, depends, func_call):
-        self.name = name
-        self.depends = depends
-        self.func_call = func_call
-
-    # pylint: disable=no-self-use
-    def to_string(self):
-        return "\n" + self.name + "\n" + self.depends + "\n" + self.func_call + "\n"
-
-class TestData:
-    """
-    Take in test_suite_pkcs7.data file.
-    Allow for new tests to be added.
-    """
-    mandatory_dep = "MBEDTLS_MD_CAN_SHA256"
-    test_name = "PKCS7 Parse Failure Invalid ASN1"
-    test_function = "pkcs7_asn1_fail:"
-    def __init__(self, file_name):
-        self.file_name = file_name
-        self.last_test_num, self.old_tests = self.read_test_file(file_name)
-        self.new_tests = []
-
-    # pylint: disable=no-self-use
-    def read_test_file(self, file):
-        """
-        Parse the test_suite_pkcs7.data file.
-        """
-        tests = []
-        if not exists(file):
-            print(file + " Does not exist")
-            sys.exit()
-        with open(file, "r", encoding='UTF-8') as fp:
-            data = fp.read()
-        lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
-        i = 0
-        while i < len(lines):
-            if "depends" in lines[i+1]:
-                tests.append(Test(lines[i], lines[i+1], lines[i+2]))
-                i += 3
-            else:
-                tests.append(Test(lines[i], None, lines[i+1]))
-                i += 2
-        latest_test_num = float(tests[-1].name.split('#')[1])
-        return latest_test_num, tests
-
-    def add(self, name, func_call):
-        self.last_test_num += 1
-        self.new_tests.append(Test(self.test_name + ": " + name +  " #" + \
-                str(self.last_test_num), "depends_on:" + self.mandatory_dep, \
-                self.test_function + '"' + func_call + '"'))
-
-    def write_changes(self):
-        with open(self.file_name, 'a', encoding='UTF-8') as fw:
-            fw.write("\n")
-            for t in self.new_tests:
-                fw.write(t.to_string())
-
-
-def asn1_mutate(data):
-    """
-    We have been given an asn1 structure representing a pkcs7.
-    We want to return an array of slightly modified versions of this data
-    they should be modified in a way which makes the structure invalid
-
-    We know that asn1 structures are:
-    |---1 byte showing data type---|----byte(s) for length of data---|---data content--|
-    We know that some data types can contain other data types.
-    Return a dictionary of reasons and mutated data types.
-    """
-
-    # off the bat just add bytes to start and end of the buffer
-    mutations = []
-    reasons = []
-    mutations.append(["00"] + data)
-    reasons.append("Add null byte to start")
-    mutations.append(data + ["00"])
-    reasons.append("Add null byte to end")
-    # for every asn1 entry we should attempt to:
-    #    - change the data type tag
-    #    - make the length longer than actual
-    #    - make the length shorter than actual
-    i = 0
-    while i < len(data):
-        tag_i = i
-        leng_i = tag_i + 1
-        data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
-        if data[leng_i][0] == '8':
-            length = int(''.join(data[leng_i + 1: data_i]), 16)
-        else:
-            length = int(data[leng_i], 16)
-
-        tag = data[tag_i]
-        print("Looking at ans1: offset " + str(i) + " tag = " + tag + \
-                ", length = " + str(length)+ ":")
-        print(''.join(data[data_i:data_i+length]))
-        # change tag to something else
-        if tag == "02":
-            # turn integers into octet strings
-            new_tag = "04"
-        else:
-            # turn everything else into an integer
-            new_tag = "02"
-        mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
-        reasons.append("Change tag " + tag + " to " + new_tag)
-
-        # change lengths to too big
-        # skip any edge cases which would cause carry over
-        if int(data[data_i - 1], 16) < 255:
-            new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
-            if len(new_length) == 1:
-                new_length = "0"+new_length
-            mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
-            reasons.append("Change length from " + str(length) + " to " \
-                    + str(length + 1))
-            # we can add another test here for tags that contain other tags \
-            # where they have more data than there containing tags account for
-            if tag in ["30", "a0", "31"]:
-                mutations.append(data[:data_i -1] + [new_length] + \
-                        data[data_i:data_i + length] + ["00"] + \
-                        data[data_i + length:])
-                reasons.append("Change contents of tag " + tag + " to contain \
-                        one unaccounted extra byte")
-        # change lengths to too small
-        if int(data[data_i - 1], 16) > 0:
-            new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
-            if len(new_length) == 1:
-                new_length = "0"+new_length
-            mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
-            reasons.append("Change length from " + str(length) + " to " + str(length - 1))
-
-        # some tag types contain other tag types so we should iterate into the data
-        if tag in ["30", "a0", "31"]:
-            i = data_i
-        else:
-            i = data_i + length
-
-    return list(zip(reasons, mutations))
-
-if __name__ == "__main__":
-    if len(sys.argv) < 2:
-        print("USAGE: " + sys.argv[0] + " <pkcs7_der_file>")
-        sys.exit()
-
-    DATA_FILE = sys.argv[1]
-    TEST_DATA = TestData(PKCS7_TEST_FILE)
-    with open(DATA_FILE, 'rb') as f:
-        DATA_STR = f.read().hex()
-    # make data an array of byte strings eg ['de','ad','be','ef']
-    HEX_DATA = list(map(''.join, [[DATA_STR[i], DATA_STR[i+1]] for i in range(0, len(DATA_STR), \
-            2)]))
-    # returns tuples of test_names and modified data buffers
-    MUT_ARR = asn1_mutate(HEX_DATA)
-
-    print("made " + str(len(MUT_ARR)) + " new tests")
-    for new_test in MUT_ARR:
-        TEST_DATA.add(new_test[0], ''.join(new_test[1]))
-
-    TEST_DATA.write_changes()
diff --git a/tests/scripts/generate_psa_tests.py b/tests/scripts/generate_psa_tests.py
deleted file mode 100755
index 75d02b9..0000000
--- a/tests/scripts/generate_psa_tests.py
+++ /dev/null
@@ -1,850 +0,0 @@
-#!/usr/bin/env python3
-"""Generate test data for PSA cryptographic mechanisms.
-
-With no arguments, generate all test data. With non-option arguments,
-generate only the specified files.
-"""
-
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-import enum
-import re
-import sys
-from typing import Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional
-
-import scripts_path # pylint: disable=unused-import
-from mbedtls_framework import crypto_data_tests
-from mbedtls_framework import crypto_knowledge
-from mbedtls_framework import macro_collector #pylint: disable=unused-import
-from mbedtls_framework import psa_information
-from mbedtls_framework import psa_storage
-from mbedtls_framework import test_case
-from mbedtls_framework import test_data_generation
-
-
-
-def test_case_for_key_type_not_supported(
-        verb: str, key_type: str, bits: int,
-        dependencies: List[str],
-        *args: str,
-        param_descr: str = ''
-) -> test_case.TestCase:
-    """Return one test case exercising a key creation method
-    for an unsupported key type or size.
-    """
-    psa_information.hack_dependencies_not_implemented(dependencies)
-    tc = test_case.TestCase()
-    short_key_type = crypto_knowledge.short_expression(key_type)
-    adverb = 'not' if dependencies else 'never'
-    if param_descr:
-        adverb = param_descr + ' ' + adverb
-    tc.set_description('PSA {} {} {}-bit {} supported'
-                       .format(verb, short_key_type, bits, adverb))
-    tc.set_dependencies(dependencies)
-    tc.set_function(verb + '_not_supported')
-    tc.set_arguments([key_type] + list(args))
-    return tc
-
-class KeyTypeNotSupported:
-    """Generate test cases for when a key type is not supported."""
-
-    def __init__(self, info: psa_information.Information) -> None:
-        self.constructors = info.constructors
-
-    ALWAYS_SUPPORTED = frozenset([
-        'PSA_KEY_TYPE_DERIVE',
-        'PSA_KEY_TYPE_PASSWORD',
-        'PSA_KEY_TYPE_PASSWORD_HASH',
-        'PSA_KEY_TYPE_RAW_DATA',
-        'PSA_KEY_TYPE_HMAC'
-    ])
-    def test_cases_for_key_type_not_supported(
-            self,
-            kt: crypto_knowledge.KeyType,
-            param: Optional[int] = None,
-            param_descr: str = '',
-    ) -> Iterator[test_case.TestCase]:
-        """Return test cases exercising key creation when the given type is unsupported.
-
-        If param is present and not None, emit test cases conditioned on this
-        parameter not being supported. If it is absent or None, emit test cases
-        conditioned on the base type not being supported.
-        """
-        if kt.name in self.ALWAYS_SUPPORTED:
-            # Don't generate test cases for key types that are always supported.
-            # They would be skipped in all configurations, which is noise.
-            return
-        import_dependencies = [('!' if param is None else '') +
-                               psa_information.psa_want_symbol(kt.name)]
-        if kt.params is not None:
-            import_dependencies += [('!' if param == i else '') +
-                                    psa_information.psa_want_symbol(sym)
-                                    for i, sym in enumerate(kt.params)]
-        if kt.name.endswith('_PUBLIC_KEY'):
-            generate_dependencies = []
-        else:
-            generate_dependencies = \
-                psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
-            import_dependencies = \
-                psa_information.fix_key_pair_dependencies(import_dependencies, 'BASIC')
-        for bits in kt.sizes_to_test():
-            yield test_case_for_key_type_not_supported(
-                'import', kt.expression, bits,
-                psa_information.finish_family_dependencies(import_dependencies, bits),
-                test_case.hex_string(kt.key_material(bits)),
-                param_descr=param_descr,
-            )
-            if not generate_dependencies and param is not None:
-                # If generation is impossible for this key type, rather than
-                # supported or not depending on implementation capabilities,
-                # only generate the test case once.
-                continue
-                # For public key we expect that key generation fails with
-                # INVALID_ARGUMENT. It is handled by KeyGenerate class.
-            if not kt.is_public():
-                yield test_case_for_key_type_not_supported(
-                    'generate', kt.expression, bits,
-                    psa_information.finish_family_dependencies(generate_dependencies, bits),
-                    str(bits),
-                    param_descr=param_descr,
-                )
-            # To be added: derive
-
-    ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
-                     'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
-    DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
-                    'PSA_KEY_TYPE_DH_PUBLIC_KEY')
-
-    def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
-        """Generate test cases that exercise the creation of keys of unsupported types."""
-        for key_type in sorted(self.constructors.key_types):
-            if key_type in self.ECC_KEY_TYPES:
-                continue
-            if key_type in self.DH_KEY_TYPES:
-                continue
-            kt = crypto_knowledge.KeyType(key_type)
-            yield from self.test_cases_for_key_type_not_supported(kt)
-        for curve_family in sorted(self.constructors.ecc_curves):
-            for constr in self.ECC_KEY_TYPES:
-                kt = crypto_knowledge.KeyType(constr, [curve_family])
-                yield from self.test_cases_for_key_type_not_supported(
-                    kt, param_descr='type')
-                yield from self.test_cases_for_key_type_not_supported(
-                    kt, 0, param_descr='curve')
-        for dh_family in sorted(self.constructors.dh_groups):
-            for constr in self.DH_KEY_TYPES:
-                kt = crypto_knowledge.KeyType(constr, [dh_family])
-                yield from self.test_cases_for_key_type_not_supported(
-                    kt, param_descr='type')
-                yield from self.test_cases_for_key_type_not_supported(
-                    kt, 0, param_descr='group')
-
-def test_case_for_key_generation(
-        key_type: str, bits: int,
-        dependencies: List[str],
-        *args: str,
-        result: str = ''
-) -> test_case.TestCase:
-    """Return one test case exercising a key generation.
-    """
-    psa_information.hack_dependencies_not_implemented(dependencies)
-    tc = test_case.TestCase()
-    short_key_type = crypto_knowledge.short_expression(key_type)
-    tc.set_description('PSA {} {}-bit'
-                       .format(short_key_type, bits))
-    tc.set_dependencies(dependencies)
-    tc.set_function('generate_key')
-    tc.set_arguments([key_type] + list(args) + [result])
-
-    return tc
-
-class KeyGenerate:
-    """Generate positive and negative (invalid argument) test cases for key generation."""
-
-    def __init__(self, info: psa_information.Information) -> None:
-        self.constructors = info.constructors
-
-    ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
-                     'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
-    DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
-                    'PSA_KEY_TYPE_DH_PUBLIC_KEY')
-
-    @staticmethod
-    def test_cases_for_key_type_key_generation(
-            kt: crypto_knowledge.KeyType
-    ) -> Iterator[test_case.TestCase]:
-        """Return test cases exercising key generation.
-
-        All key types can be generated except for public keys. For public key
-        PSA_ERROR_INVALID_ARGUMENT status is expected.
-        """
-        result = 'PSA_SUCCESS'
-
-        import_dependencies = [psa_information.psa_want_symbol(kt.name)]
-        if kt.params is not None:
-            import_dependencies += [psa_information.psa_want_symbol(sym)
-                                    for i, sym in enumerate(kt.params)]
-        if kt.name.endswith('_PUBLIC_KEY'):
-            # The library checks whether the key type is a public key generically,
-            # before it reaches a point where it needs support for the specific key
-            # type, so it returns INVALID_ARGUMENT for unsupported public key types.
-            generate_dependencies = []
-            result = 'PSA_ERROR_INVALID_ARGUMENT'
-        else:
-            generate_dependencies = \
-                psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
-        for bits in kt.sizes_to_test():
-            if kt.name == 'PSA_KEY_TYPE_RSA_KEY_PAIR':
-                size_dependency = "PSA_VENDOR_RSA_GENERATE_MIN_KEY_BITS <= " +  str(bits)
-                test_dependencies = generate_dependencies + [size_dependency]
-            else:
-                test_dependencies = generate_dependencies
-            yield test_case_for_key_generation(
-                kt.expression, bits,
-                psa_information.finish_family_dependencies(test_dependencies, bits),
-                str(bits),
-                result
-            )
-
-    def test_cases_for_key_generation(self) -> Iterator[test_case.TestCase]:
-        """Generate test cases that exercise the generation of keys."""
-        for key_type in sorted(self.constructors.key_types):
-            if key_type in self.ECC_KEY_TYPES:
-                continue
-            if key_type in self.DH_KEY_TYPES:
-                continue
-            kt = crypto_knowledge.KeyType(key_type)
-            yield from self.test_cases_for_key_type_key_generation(kt)
-        for curve_family in sorted(self.constructors.ecc_curves):
-            for constr in self.ECC_KEY_TYPES:
-                kt = crypto_knowledge.KeyType(constr, [curve_family])
-                yield from self.test_cases_for_key_type_key_generation(kt)
-        for dh_family in sorted(self.constructors.dh_groups):
-            for constr in self.DH_KEY_TYPES:
-                kt = crypto_knowledge.KeyType(constr, [dh_family])
-                yield from self.test_cases_for_key_type_key_generation(kt)
-
-class OpFail:
-    """Generate test cases for operations that must fail."""
-    #pylint: disable=too-few-public-methods
-
-    class Reason(enum.Enum):
-        NOT_SUPPORTED = 0
-        INVALID = 1
-        INCOMPATIBLE = 2
-        PUBLIC = 3
-
-    def __init__(self, info: psa_information.Information) -> None:
-        self.constructors = info.constructors
-        key_type_expressions = self.constructors.generate_expressions(
-            sorted(self.constructors.key_types)
-        )
-        self.key_types = [crypto_knowledge.KeyType(kt_expr)
-                          for kt_expr in key_type_expressions]
-
-    def make_test_case(
-            self,
-            alg: crypto_knowledge.Algorithm,
-            category: crypto_knowledge.AlgorithmCategory,
-            reason: 'Reason',
-            kt: Optional[crypto_knowledge.KeyType] = None,
-            not_deps: FrozenSet[str] = frozenset(),
-    ) -> test_case.TestCase:
-        """Construct a failure test case for a one-key or keyless operation."""
-        #pylint: disable=too-many-arguments,too-many-locals
-        tc = test_case.TestCase()
-        pretty_alg = alg.short_expression()
-        if reason == self.Reason.NOT_SUPPORTED:
-            short_deps = [re.sub(r'PSA_WANT_ALG_', r'', dep)
-                          for dep in not_deps]
-            pretty_reason = '!' + '&'.join(sorted(short_deps))
-        else:
-            pretty_reason = reason.name.lower()
-        if kt:
-            key_type = kt.expression
-            pretty_type = kt.short_expression()
-        else:
-            key_type = ''
-            pretty_type = ''
-        tc.set_description('PSA {} {}: {}{}'
-                           .format(category.name.lower(),
-                                   pretty_alg,
-                                   pretty_reason,
-                                   ' with ' + pretty_type if pretty_type else ''))
-        dependencies = psa_information.automatic_dependencies(alg.base_expression, key_type)
-        dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
-        for i, dep in enumerate(dependencies):
-            if dep in not_deps:
-                dependencies[i] = '!' + dep
-        tc.set_dependencies(dependencies)
-        tc.set_function(category.name.lower() + '_fail')
-        arguments = [] # type: List[str]
-        if kt:
-            key_material = kt.key_material(kt.sizes_to_test()[0])
-            arguments += [key_type, test_case.hex_string(key_material)]
-        arguments.append(alg.expression)
-        if category.is_asymmetric():
-            arguments.append('1' if reason == self.Reason.PUBLIC else '0')
-        error = ('NOT_SUPPORTED' if reason == self.Reason.NOT_SUPPORTED else
-                 'INVALID_ARGUMENT')
-        arguments.append('PSA_ERROR_' + error)
-        tc.set_arguments(arguments)
-        return tc
-
-    def no_key_test_cases(
-            self,
-            alg: crypto_knowledge.Algorithm,
-            category: crypto_knowledge.AlgorithmCategory,
-    ) -> Iterator[test_case.TestCase]:
-        """Generate failure test cases for keyless operations with the specified algorithm."""
-        if alg.can_do(category):
-            # Compatible operation, unsupported algorithm
-            for dep in psa_information.automatic_dependencies(alg.base_expression):
-                yield self.make_test_case(alg, category,
-                                          self.Reason.NOT_SUPPORTED,
-                                          not_deps=frozenset([dep]))
-        else:
-            # Incompatible operation, supported algorithm
-            yield self.make_test_case(alg, category, self.Reason.INVALID)
-
-    def one_key_test_cases(
-            self,
-            alg: crypto_knowledge.Algorithm,
-            category: crypto_knowledge.AlgorithmCategory,
-    ) -> Iterator[test_case.TestCase]:
-        """Generate failure test cases for one-key operations with the specified algorithm."""
-        for kt in self.key_types:
-            key_is_compatible = kt.can_do(alg)
-            if key_is_compatible and alg.can_do(category):
-                # Compatible key and operation, unsupported algorithm
-                for dep in psa_information.automatic_dependencies(alg.base_expression):
-                    yield self.make_test_case(alg, category,
-                                              self.Reason.NOT_SUPPORTED,
-                                              kt=kt, not_deps=frozenset([dep]))
-                # Public key for a private-key operation
-                if category.is_asymmetric() and kt.is_public():
-                    yield self.make_test_case(alg, category,
-                                              self.Reason.PUBLIC,
-                                              kt=kt)
-            elif key_is_compatible:
-                # Compatible key, incompatible operation, supported algorithm
-                yield self.make_test_case(alg, category,
-                                          self.Reason.INVALID,
-                                          kt=kt)
-            elif alg.can_do(category):
-                # Incompatible key, compatible operation, supported algorithm
-                yield self.make_test_case(alg, category,
-                                          self.Reason.INCOMPATIBLE,
-                                          kt=kt)
-            else:
-                # Incompatible key and operation. Don't test cases where
-                # multiple things are wrong, to keep the number of test
-                # cases reasonable.
-                pass
-
-    def test_cases_for_algorithm(
-            self,
-            alg: crypto_knowledge.Algorithm,
-    ) -> Iterator[test_case.TestCase]:
-        """Generate operation failure test cases for the specified algorithm."""
-        for category in crypto_knowledge.AlgorithmCategory:
-            if category == crypto_knowledge.AlgorithmCategory.PAKE:
-                # PAKE operations are not implemented yet
-                pass
-            elif category.requires_key():
-                yield from self.one_key_test_cases(alg, category)
-            else:
-                yield from self.no_key_test_cases(alg, category)
-
-    def all_test_cases(self) -> Iterator[test_case.TestCase]:
-        """Generate all test cases for operations that must fail."""
-        algorithms = sorted(self.constructors.algorithms)
-        for expr in self.constructors.generate_expressions(algorithms):
-            alg = crypto_knowledge.Algorithm(expr)
-            yield from self.test_cases_for_algorithm(alg)
-
-
-class StorageKey(psa_storage.Key):
-    """Representation of a key for storage format testing."""
-
-    IMPLICIT_USAGE_FLAGS = {
-        'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
-        'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
-    } #type: Dict[str, str]
-    """Mapping of usage flags to the flags that they imply."""
-
-    def __init__(
-            self,
-            usage: Iterable[str],
-            without_implicit_usage: Optional[bool] = False,
-            **kwargs
-    ) -> None:
-        """Prepare to generate a key.
-
-        * `usage`                 : The usage flags used for the key.
-        * `without_implicit_usage`: Flag to define to apply the usage extension
-        """
-        usage_flags = set(usage)
-        if not without_implicit_usage:
-            for flag in sorted(usage_flags):
-                if flag in self.IMPLICIT_USAGE_FLAGS:
-                    usage_flags.add(self.IMPLICIT_USAGE_FLAGS[flag])
-        if usage_flags:
-            usage_expression = ' | '.join(sorted(usage_flags))
-        else:
-            usage_expression = '0'
-        super().__init__(usage=usage_expression, **kwargs)
-
-class StorageTestData(StorageKey):
-    """Representation of test case data for storage format testing."""
-
-    def __init__(
-            self,
-            description: str,
-            expected_usage: Optional[List[str]] = None,
-            **kwargs
-    ) -> None:
-        """Prepare to generate test data
-
-        * `description`   : used for the test case names
-        * `expected_usage`: the usage flags generated as the expected usage flags
-                            in the test cases. CAn differ from the usage flags
-                            stored in the keys because of the usage flags extension.
-        """
-        super().__init__(**kwargs)
-        self.description = description #type: str
-        if expected_usage is None:
-            self.expected_usage = self.usage #type: psa_storage.Expr
-        elif expected_usage:
-            self.expected_usage = psa_storage.Expr(' | '.join(expected_usage))
-        else:
-            self.expected_usage = psa_storage.Expr(0)
-
-class StorageFormat:
-    """Storage format stability test cases."""
-
-    def __init__(self, info: psa_information.Information, version: int, forward: bool) -> None:
-        """Prepare to generate test cases for storage format stability.
-
-        * `info`: information about the API. See the `Information` class.
-        * `version`: the storage format version to generate test cases for.
-        * `forward`: if true, generate forward compatibility test cases which
-          save a key and check that its representation is as intended. Otherwise
-          generate backward compatibility test cases which inject a key
-          representation and check that it can be read and used.
-        """
-        self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
-        self.version = version #type: int
-        self.forward = forward #type: bool
-
-    RSA_OAEP_RE = re.compile(r'PSA_ALG_RSA_OAEP\((.*)\)\Z')
-    BRAINPOOL_RE = re.compile(r'PSA_KEY_TYPE_\w+\(PSA_ECC_FAMILY_BRAINPOOL_\w+\)\Z')
-    @classmethod
-    def exercise_key_with_algorithm(
-            cls,
-            key_type: psa_storage.Expr, bits: int,
-            alg: psa_storage.Expr
-    ) -> bool:
-        """Whether to exercise the given key with the given algorithm.
-
-        Normally only the type and algorithm matter for compatibility, and
-        this is handled in crypto_knowledge.KeyType.can_do(). This function
-        exists to detect exceptional cases. Exceptional cases detected here
-        are not tested in OpFail and should therefore have manually written
-        test cases.
-        """
-        # Some test keys have the RAW_DATA type and attributes that don't
-        # necessarily make sense. We do this to validate numerical
-        # encodings of the attributes.
-        # Raw data keys have no useful exercise anyway so there is no
-        # loss of test coverage.
-        if key_type.string == 'PSA_KEY_TYPE_RAW_DATA':
-            return False
-        # OAEP requires room for two hashes plus wrapping
-        m = cls.RSA_OAEP_RE.match(alg.string)
-        if m:
-            hash_alg = m.group(1)
-            hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg)
-            key_length = (bits + 7) // 8
-            # Leave enough room for at least one byte of plaintext
-            return key_length > 2 * hash_length + 2
-        # There's nothing wrong with ECC keys on Brainpool curves,
-        # but operations with them are very slow. So we only exercise them
-        # with a single algorithm, not with all possible hashes. We do
-        # exercise other curves with all algorithms so test coverage is
-        # perfectly adequate like this.
-        m = cls.BRAINPOOL_RE.match(key_type.string)
-        if m and alg.string != 'PSA_ALG_ECDSA_ANY':
-            return False
-        return True
-
-    def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
-        """Construct a storage format test case for the given key.
-
-        If ``forward`` is true, generate a forward compatibility test case:
-        create a key and validate that it has the expected representation.
-        Otherwise generate a backward compatibility test case: inject the
-        key representation into storage and validate that it can be read
-        correctly.
-        """
-        verb = 'save' if self.forward else 'read'
-        tc = test_case.TestCase()
-        tc.set_description(verb + ' ' + key.description)
-        dependencies = psa_information.automatic_dependencies(
-            key.lifetime.string, key.type.string,
-            key.alg.string, key.alg2.string,
-        )
-        dependencies = psa_information.finish_family_dependencies(dependencies, key.bits)
-        dependencies += psa_information.generate_deps_from_description(key.description)
-        dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
-        tc.set_dependencies(dependencies)
-        tc.set_function('key_storage_' + verb)
-        if self.forward:
-            extra_arguments = []
-        else:
-            flags = []
-            if self.exercise_key_with_algorithm(key.type, key.bits, key.alg):
-                flags.append('TEST_FLAG_EXERCISE')
-            if 'READ_ONLY' in key.lifetime.string:
-                flags.append('TEST_FLAG_READ_ONLY')
-            extra_arguments = [' | '.join(flags) if flags else '0']
-        tc.set_arguments([key.lifetime.string,
-                          key.type.string, str(key.bits),
-                          key.expected_usage.string,
-                          key.alg.string, key.alg2.string,
-                          '"' + key.material.hex() + '"',
-                          '"' + key.hex() + '"',
-                          *extra_arguments])
-        return tc
-
-    def key_for_lifetime(
-            self,
-            lifetime: str,
-    ) -> StorageTestData:
-        """Construct a test key for the given lifetime."""
-        short = lifetime
-        short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
-                       r'', short)
-        short = crypto_knowledge.short_expression(short)
-        description = 'lifetime: ' + short
-        key = StorageTestData(version=self.version,
-                              id=1, lifetime=lifetime,
-                              type='PSA_KEY_TYPE_RAW_DATA', bits=8,
-                              usage=['PSA_KEY_USAGE_EXPORT'], alg=0, alg2=0,
-                              material=b'L',
-                              description=description)
-        return key
-
-    def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
-        """Generate test keys covering lifetimes."""
-        lifetimes = sorted(self.constructors.lifetimes)
-        expressions = self.constructors.generate_expressions(lifetimes)
-        for lifetime in expressions:
-            # Don't attempt to create or load a volatile key in storage
-            if 'VOLATILE' in lifetime:
-                continue
-            # Don't attempt to create a read-only key in storage,
-            # but do attempt to load one.
-            if 'READ_ONLY' in lifetime and self.forward:
-                continue
-            yield self.key_for_lifetime(lifetime)
-
-    def key_for_usage_flags(
-            self,
-            usage_flags: List[str],
-            short: Optional[str] = None,
-            test_implicit_usage: Optional[bool] = True
-    ) -> StorageTestData:
-        """Construct a test key for the given key usage."""
-        extra_desc = ' without implication' if test_implicit_usage else ''
-        description = 'usage' + extra_desc + ': '
-        key1 = StorageTestData(version=self.version,
-                               id=1, lifetime=0x00000001,
-                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
-                               expected_usage=usage_flags,
-                               without_implicit_usage=not test_implicit_usage,
-                               usage=usage_flags, alg=0, alg2=0,
-                               material=b'K',
-                               description=description)
-        if short is None:
-            usage_expr = key1.expected_usage.string
-            key1.description += crypto_knowledge.short_expression(usage_expr)
-        else:
-            key1.description += short
-        return key1
-
-    def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
-        """Generate test keys covering usage flags."""
-        known_flags = sorted(self.constructors.key_usage_flags)
-        yield self.key_for_usage_flags(['0'], **kwargs)
-        for usage_flag in known_flags:
-            yield self.key_for_usage_flags([usage_flag], **kwargs)
-        for flag1, flag2 in zip(known_flags,
-                                known_flags[1:] + [known_flags[0]]):
-            yield self.key_for_usage_flags([flag1, flag2], **kwargs)
-
-    def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
-        known_flags = sorted(self.constructors.key_usage_flags)
-        yield self.key_for_usage_flags(known_flags, short='all known')
-
-    def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
-        yield from self.generate_keys_for_usage_flags()
-        yield from self.generate_key_for_all_usage_flags()
-
-    def key_for_type_and_alg(
-            self,
-            kt: crypto_knowledge.KeyType,
-            bits: int,
-            alg: Optional[crypto_knowledge.Algorithm] = None,
-    ) -> StorageTestData:
-        """Construct a test key of the given type.
-
-        If alg is not None, this key allows it.
-        """
-        usage_flags = ['PSA_KEY_USAGE_EXPORT']
-        alg1 = 0 #type: psa_storage.Exprable
-        alg2 = 0
-        if alg is not None:
-            alg1 = alg.expression
-            usage_flags += alg.usage_flags(public=kt.is_public())
-        key_material = kt.key_material(bits)
-        description = 'type: {} {}-bit'.format(kt.short_expression(1), bits)
-        if alg is not None:
-            description += ', ' + alg.short_expression(1)
-        key = StorageTestData(version=self.version,
-                              id=1, lifetime=0x00000001,
-                              type=kt.expression, bits=bits,
-                              usage=usage_flags, alg=alg1, alg2=alg2,
-                              material=key_material,
-                              description=description)
-        return key
-
-    def keys_for_type(
-            self,
-            key_type: str,
-            all_algorithms: List[crypto_knowledge.Algorithm],
-    ) -> Iterator[StorageTestData]:
-        """Generate test keys for the given key type."""
-        kt = crypto_knowledge.KeyType(key_type)
-        for bits in kt.sizes_to_test():
-            # Test a non-exercisable key, as well as exercisable keys for
-            # each compatible algorithm.
-            # To do: test reading a key from storage with an incompatible
-            # or unsupported algorithm.
-            yield self.key_for_type_and_alg(kt, bits)
-            compatible_algorithms = [alg for alg in all_algorithms
-                                     if kt.can_do(alg)]
-            for alg in compatible_algorithms:
-                yield self.key_for_type_and_alg(kt, bits, alg)
-
-    def all_keys_for_types(self) -> Iterator[StorageTestData]:
-        """Generate test keys covering key types and their representations."""
-        key_types = sorted(self.constructors.key_types)
-        all_algorithms = [crypto_knowledge.Algorithm(alg)
-                          for alg in self.constructors.generate_expressions(
-                              sorted(self.constructors.algorithms)
-                          )]
-        for key_type in self.constructors.generate_expressions(key_types):
-            yield from self.keys_for_type(key_type, all_algorithms)
-
-    def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
-        """Generate test keys for the encoding of the specified algorithm."""
-        # These test cases only validate the encoding of algorithms, not
-        # whether the key read from storage is suitable for an operation.
-        # `keys_for_types` generate read tests with an algorithm and a
-        # compatible key.
-        descr = crypto_knowledge.short_expression(alg, 1)
-        usage = ['PSA_KEY_USAGE_EXPORT']
-        key1 = StorageTestData(version=self.version,
-                               id=1, lifetime=0x00000001,
-                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
-                               usage=usage, alg=alg, alg2=0,
-                               material=b'K',
-                               description='alg: ' + descr)
-        yield key1
-        key2 = StorageTestData(version=self.version,
-                               id=1, lifetime=0x00000001,
-                               type='PSA_KEY_TYPE_RAW_DATA', bits=8,
-                               usage=usage, alg=0, alg2=alg,
-                               material=b'L',
-                               description='alg2: ' + descr)
-        yield key2
-
-    def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
-        """Generate test keys covering algorithm encodings."""
-        algorithms = sorted(self.constructors.algorithms)
-        for alg in self.constructors.generate_expressions(algorithms):
-            yield from self.keys_for_algorithm(alg)
-
-    def generate_all_keys(self) -> Iterator[StorageTestData]:
-        """Generate all keys for the test cases."""
-        yield from self.all_keys_for_lifetimes()
-        yield from self.all_keys_for_usage_flags()
-        yield from self.all_keys_for_types()
-        yield from self.all_keys_for_algorithms()
-
-    def all_test_cases(self) -> Iterator[test_case.TestCase]:
-        """Generate all storage format test cases."""
-        # First build a list of all keys, then construct all the corresponding
-        # test cases. This allows all required information to be obtained in
-        # one go, which is a significant performance gain as the information
-        # includes numerical values obtained by compiling a C program.
-        all_keys = list(self.generate_all_keys())
-        for key in all_keys:
-            if key.location_value() != 0:
-                # Skip keys with a non-default location, because they
-                # require a driver and we currently have no mechanism to
-                # determine whether a driver is available.
-                continue
-            yield self.make_test_case(key)
-
-class StorageFormatForward(StorageFormat):
-    """Storage format stability test cases for forward compatibility."""
-
-    def __init__(self, info: psa_information.Information, version: int) -> None:
-        super().__init__(info, version, True)
-
-class StorageFormatV0(StorageFormat):
-    """Storage format stability test cases for version 0 compatibility."""
-
-    def __init__(self, info: psa_information.Information) -> None:
-        super().__init__(info, 0, False)
-
-    def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
-        """Generate test keys covering usage flags."""
-        yield from super().all_keys_for_usage_flags()
-        yield from self.generate_keys_for_usage_flags(test_implicit_usage=False)
-
-    def keys_for_implicit_usage(
-            self,
-            implyer_usage: str,
-            alg: str,
-            key_type: crypto_knowledge.KeyType
-    ) -> StorageTestData:
-        # pylint: disable=too-many-locals
-        """Generate test keys for the specified implicit usage flag,
-           algorithm and key type combination.
-        """
-        bits = key_type.sizes_to_test()[0]
-        implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
-        usage_flags = ['PSA_KEY_USAGE_EXPORT']
-        material_usage_flags = usage_flags + [implyer_usage]
-        expected_usage_flags = material_usage_flags + [implicit_usage]
-        alg2 = 0
-        key_material = key_type.key_material(bits)
-        usage_expression = crypto_knowledge.short_expression(implyer_usage, 1)
-        alg_expression = crypto_knowledge.short_expression(alg, 1)
-        key_type_expression = key_type.short_expression(1)
-        description = 'implied by {}: {} {} {}-bit'.format(
-            usage_expression, alg_expression, key_type_expression, bits)
-        key = StorageTestData(version=self.version,
-                              id=1, lifetime=0x00000001,
-                              type=key_type.expression, bits=bits,
-                              usage=material_usage_flags,
-                              expected_usage=expected_usage_flags,
-                              without_implicit_usage=True,
-                              alg=alg, alg2=alg2,
-                              material=key_material,
-                              description=description)
-        return key
-
-    def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
-        # pylint: disable=too-many-locals
-        """Match possible key types for sign algorithms."""
-        # To create a valid combination both the algorithms and key types
-        # must be filtered. Pair them with keywords created from its names.
-        incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
-        incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
-        keyword_translation = {
-            'ECDSA': 'ECC',
-            'ED[0-9]*.*' : 'EDWARDS'
-        }
-        exclusive_keywords = {
-            'EDWARDS': 'ECC'
-        }
-        key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
-        algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
-        alg_with_keys = {} #type: Dict[str, List[str]]
-        translation_table = str.maketrans('(', '_', ')')
-        for alg in algorithms:
-            # Generate keywords from the name of the algorithm
-            alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
-            # Translate keywords for better matching with the key types
-            for keyword in alg_keywords.copy():
-                for pattern, replace in keyword_translation.items():
-                    if re.match(pattern, keyword):
-                        alg_keywords.remove(keyword)
-                        alg_keywords.add(replace)
-            # Filter out incompatible algorithms
-            if not alg_keywords.isdisjoint(incompatible_alg_keyword):
-                continue
-
-            for key_type in key_types:
-                # Generate keywords from the of the key type
-                key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
-
-                # Remove ambiguous keywords
-                for keyword1, keyword2 in exclusive_keywords.items():
-                    if keyword1 in key_type_keywords:
-                        key_type_keywords.remove(keyword2)
-
-                if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
-                   not key_type_keywords.isdisjoint(alg_keywords):
-                    if alg in alg_with_keys:
-                        alg_with_keys[alg].append(key_type)
-                    else:
-                        alg_with_keys[alg] = [key_type]
-        return alg_with_keys
-
-    def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
-        """Generate test keys for usage flag extensions."""
-        # Generate a key type and algorithm pair for each extendable usage
-        # flag to generate a valid key for exercising. The key is generated
-        # without usage extension to check the extension compatibility.
-        alg_with_keys = self.gather_key_types_for_sign_alg()
-
-        for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
-            for alg in sorted(alg_with_keys):
-                for key_type in sorted(alg_with_keys[alg]):
-                    # The key types must be filtered to fit the specific usage flag.
-                    kt = crypto_knowledge.KeyType(key_type)
-                    if kt.is_public() and '_SIGN_' in usage:
-                        # Can't sign with a public key
-                        continue
-                    yield self.keys_for_implicit_usage(usage, alg, kt)
-
-    def generate_all_keys(self) -> Iterator[StorageTestData]:
-        yield from super().generate_all_keys()
-        yield from self.all_keys_for_implicit_usage()
-
-
-class PSATestGenerator(test_data_generation.TestGenerator):
-    """Test generator subclass including PSA targets and info."""
-    # Note that targets whose names contain 'test_format' have their content
-    # validated by `abi_check.py`.
-    targets = {
-        'test_suite_psa_crypto_generate_key.generated':
-        lambda info: KeyGenerate(info).test_cases_for_key_generation(),
-        'test_suite_psa_crypto_not_supported.generated':
-        lambda info: KeyTypeNotSupported(info).test_cases_for_not_supported(),
-        'test_suite_psa_crypto_low_hash.generated':
-        lambda info: crypto_data_tests.HashPSALowLevel(info).all_test_cases(),
-        'test_suite_psa_crypto_op_fail.generated':
-        lambda info: OpFail(info).all_test_cases(),
-        'test_suite_psa_crypto_storage_format.current':
-        lambda info: StorageFormatForward(info, 0).all_test_cases(),
-        'test_suite_psa_crypto_storage_format.v0':
-        lambda info: StorageFormatV0(info).all_test_cases(),
-    } #type: Dict[str, Callable[[psa_information.Information], Iterable[test_case.TestCase]]]
-
-    def __init__(self, options):
-        super().__init__(options)
-        self.info = psa_information.Information()
-
-    def generate_target(self, name: str, *target_args) -> None:
-        super().generate_target(name, self.info)
-
-
-if __name__ == '__main__':
-    test_data_generation.main(sys.argv[1:], __doc__, PSATestGenerator)
diff --git a/tests/scripts/generate_psa_wrappers.py b/tests/scripts/generate_psa_wrappers.py
deleted file mode 100755
index 500693b..0000000
--- a/tests/scripts/generate_psa_wrappers.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python3
-"""Generate wrapper functions for PSA function calls.
-"""
-
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-### WARNING: the code in this file has not been extensively reviewed yet.
-### We do not think it is harmful, but it may be below our normal standards
-### for robustness and maintainability.
-
-import argparse
-import itertools
-import os
-from typing import Iterator, List, Optional, Tuple
-
-import scripts_path #pylint: disable=unused-import
-from mbedtls_framework import build_tree
-from mbedtls_framework import c_parsing_helper
-from mbedtls_framework import c_wrapper_generator
-from mbedtls_framework import typing_util
-
-
-class BufferParameter:
-    """Description of an input or output buffer parameter sequence to a PSA function."""
-    #pylint: disable=too-few-public-methods
-
-    def __init__(self, i: int, is_output: bool,
-                 buffer_name: str, size_name: str) -> None:
-        """Initialize the parameter information.
-
-        i is the index of the function argument that is the pointer to the buffer.
-        The size is argument i+1. For a variable-size output, the actual length
-        goes in argument i+2.
-
-        buffer_name and size_names are the names of arguments i and i+1.
-        This class does not yet help with the output length.
-        """
-        self.index = i
-        self.buffer_name = buffer_name
-        self.size_name = size_name
-        self.is_output = is_output
-
-
-class PSAWrapperGenerator(c_wrapper_generator.Base):
-    """Generate a C source file containing wrapper functions for PSA Crypto API calls."""
-
-    _CPP_GUARDS = ('defined(MBEDTLS_PSA_CRYPTO_C) && ' +
-                   'defined(MBEDTLS_TEST_HOOKS) && \\\n    ' +
-                   '!defined(RECORD_PSA_STATUS_COVERAGE_LOG)')
-    _WRAPPER_NAME_PREFIX = 'mbedtls_test_wrap_'
-    _WRAPPER_NAME_SUFFIX = ''
-
-    def gather_data(self) -> None:
-        root_dir = build_tree.guess_mbedtls_root()
-        for header_name in ['crypto.h', 'crypto_extra.h']:
-            header_path = os.path.join(root_dir, 'include', 'psa', header_name)
-            c_parsing_helper.read_function_declarations(self.functions, header_path)
-
-    _SKIP_FUNCTIONS = frozenset([
-        'mbedtls_psa_external_get_random', # not a library function
-        'psa_get_key_domain_parameters', # client-side function
-        'psa_get_key_slot_number', # client-side function
-        'psa_key_derivation_verify_bytes', # not implemented yet
-        'psa_key_derivation_verify_key', # not implemented yet
-        'psa_set_key_domain_parameters', # client-side function
-    ])
-
-    def _skip_function(self, function: c_wrapper_generator.FunctionInfo) -> bool:
-        if function.return_type != 'psa_status_t':
-            return True
-        if function.name in self._SKIP_FUNCTIONS:
-            return True
-        return False
-
-    # PAKE stuff: not implemented yet
-    _PAKE_STUFF = frozenset([
-        'psa_crypto_driver_pake_inputs_t *',
-        'psa_pake_cipher_suite_t *',
-    ])
-
-    def _return_variable_name(self,
-                              function: c_wrapper_generator.FunctionInfo) -> str:
-        """The name of the variable that will contain the return value."""
-        if function.return_type == 'psa_status_t':
-            return 'status'
-        return super()._return_variable_name(function)
-
-    _FUNCTION_GUARDS = c_wrapper_generator.Base._FUNCTION_GUARDS.copy() \
-        #pylint: disable=protected-access
-    _FUNCTION_GUARDS.update({
-        'mbedtls_psa_register_se_key': 'defined(MBEDTLS_PSA_CRYPTO_SE_C)',
-        'mbedtls_psa_inject_entropy': 'defined(MBEDTLS_PSA_INJECT_ENTROPY)',
-        'mbedtls_psa_external_get_random': 'defined(MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG)',
-        'mbedtls_psa_platform_get_builtin_key': 'defined(MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS)',
-    })
-
-    @staticmethod
-    def _detect_buffer_parameters(arguments: List[c_parsing_helper.ArgumentInfo],
-                                  argument_names: List[str]) -> Iterator[BufferParameter]:
-        """Detect function arguments that are buffers (pointer, size [,length])."""
-        types = ['' if arg.suffix else arg.type for arg in arguments]
-        # pairs = list of (type_of_arg_N, type_of_arg_N+1)
-        # where each type_of_arg_X is the empty string if the type is an array
-        # or there is no argument X.
-        pairs = enumerate(itertools.zip_longest(types, types[1:], fillvalue=''))
-        for i, t01 in pairs:
-            if (t01[0] == 'const uint8_t *' or t01[0] == 'uint8_t *') and \
-               t01[1] == 'size_t':
-                yield BufferParameter(i, not t01[0].startswith('const '),
-                                      argument_names[i], argument_names[i+1])
-
-    @staticmethod
-    def _write_poison_buffer_parameter(out: typing_util.Writable,
-                                       param: BufferParameter,
-                                       poison: bool) -> None:
-        """Write poisoning or unpoisoning code for a buffer parameter.
-
-        Write poisoning code if poison is true, unpoisoning code otherwise.
-        """
-        out.write('    MBEDTLS_TEST_MEMORY_{}({}, {});\n'.format(
-            'POISON' if poison else 'UNPOISON',
-            param.buffer_name, param.size_name
-        ))
-
-    def _write_poison_buffer_parameters(self, out: typing_util.Writable,
-                                        buffer_parameters: List[BufferParameter],
-                                        poison: bool) -> None:
-        """Write poisoning or unpoisoning code for the buffer parameters.
-
-        Write poisoning code if poison is true, unpoisoning code otherwise.
-        """
-        if not buffer_parameters:
-            return
-        out.write('#if !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS)\n')
-        for param in buffer_parameters:
-            self._write_poison_buffer_parameter(out, param, poison)
-        out.write('#endif /* !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS) */\n')
-
-    @staticmethod
-    def _parameter_should_be_copied(function_name: str,
-                                    _buffer_name: Optional[str]) -> bool:
-        """Whether the specified buffer argument to a PSA function should be copied.
-        """
-        # False-positives that do not need buffer copying
-        if function_name in ('mbedtls_psa_inject_entropy',
-                             'psa_crypto_driver_pake_get_password',
-                             'psa_crypto_driver_pake_get_user',
-                             'psa_crypto_driver_pake_get_peer'):
-            return False
-
-        return True
-
-    def _write_function_call(self, out: typing_util.Writable,
-                             function: c_wrapper_generator.FunctionInfo,
-                             argument_names: List[str]) -> None:
-        buffer_parameters = list(
-            param
-            for param in self._detect_buffer_parameters(function.arguments,
-                                                        argument_names)
-            if self._parameter_should_be_copied(function.name,
-                                                function.arguments[param.index].name))
-        self._write_poison_buffer_parameters(out, buffer_parameters, True)
-        super()._write_function_call(out, function, argument_names)
-        self._write_poison_buffer_parameters(out, buffer_parameters, False)
-
-    def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
-        super()._write_prologue(out, header)
-        out.write("""
-#if {}
-
-#include <psa/crypto.h>
-
-#include <test/memory.h>
-#include <test/psa_crypto_helpers.h>
-#include <test/psa_test_wrappers.h>
-"""
-                  .format(self._CPP_GUARDS))
-
-    def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None:
-        out.write("""
-#endif /* {} */
-"""
-                  .format(self._CPP_GUARDS))
-        super()._write_epilogue(out, header)
-
-
-class PSALoggingWrapperGenerator(PSAWrapperGenerator, c_wrapper_generator.Logging):
-    """Generate a C source file containing wrapper functions that log PSA Crypto API calls."""
-
-    def __init__(self, stream: str) -> None:
-        super().__init__()
-        self.set_stream(stream)
-
-    _PRINTF_TYPE_CAST = c_wrapper_generator.Logging._PRINTF_TYPE_CAST.copy()
-    _PRINTF_TYPE_CAST.update({
-        'mbedtls_svc_key_id_t': 'unsigned',
-        'psa_algorithm_t': 'unsigned',
-        'psa_drv_slot_number_t': 'unsigned long long',
-        'psa_key_derivation_step_t': 'int',
-        'psa_key_id_t': 'unsigned',
-        'psa_key_slot_number_t': 'unsigned long long',
-        'psa_key_lifetime_t': 'unsigned',
-        'psa_key_type_t': 'unsigned',
-        'psa_key_usage_flags_t': 'unsigned',
-        'psa_pake_role_t': 'int',
-        'psa_pake_step_t': 'int',
-        'psa_status_t': 'int',
-    })
-
-    def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]:
-        if typ.startswith('const '):
-            typ = typ[6:]
-        if typ == 'uint8_t *':
-            # Skip buffers
-            return '', []
-        if typ.endswith('operation_t *'):
-            return '', []
-        if typ in self._PAKE_STUFF:
-            return '', []
-        if typ == 'psa_key_attributes_t *':
-            return (var + '={id=%u, lifetime=0x%08x, type=0x%08x, bits=%u, alg=%08x, usage=%08x}',
-                    ['(unsigned) psa_get_key_{}({})'.format(field, var)
-                     for field in ['id', 'lifetime', 'type', 'bits', 'algorithm', 'usage_flags']])
-        return super()._printf_parameters(typ, var)
-
-
-DEFAULT_C_OUTPUT_FILE_NAME = 'tests/src/psa_test_wrappers.c'
-DEFAULT_H_OUTPUT_FILE_NAME = 'tests/include/test/psa_test_wrappers.h'
-
-def main() -> None:
-    parser = argparse.ArgumentParser(description=globals()['__doc__'])
-    parser.add_argument('--log',
-                        help='Stream to log to (default: no logging code)')
-    parser.add_argument('--output-c',
-                        metavar='FILENAME',
-                        default=DEFAULT_C_OUTPUT_FILE_NAME,
-                        help=('Output .c file path (default: {}; skip .c output if empty)'
-                              .format(DEFAULT_C_OUTPUT_FILE_NAME)))
-    parser.add_argument('--output-h',
-                        metavar='FILENAME',
-                        default=DEFAULT_H_OUTPUT_FILE_NAME,
-                        help=('Output .h file path (default: {}; skip .h output if empty)'
-                              .format(DEFAULT_H_OUTPUT_FILE_NAME)))
-    options = parser.parse_args()
-    if options.log:
-        generator = PSALoggingWrapperGenerator(options.log) #type: PSAWrapperGenerator
-    else:
-        generator = PSAWrapperGenerator()
-    generator.gather_data()
-    if options.output_h:
-        generator.write_h_file(options.output_h)
-    if options.output_c:
-        generator.write_c_file(options.output_c)
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/scripts/generate_test_cert_macros.py b/tests/scripts/generate_test_cert_macros.py
deleted file mode 100755
index 14270e0..0000000
--- a/tests/scripts/generate_test_cert_macros.py
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/env python3
-
-"""
-Generate `tests/src/test_certs.h` which includes certficaties/keys/certificate list for testing.
-"""
-
-#
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-
-import os
-import sys
-import argparse
-import jinja2
-import scripts_path # pylint: disable=unused-import
-from mbedtls_framework.build_tree import guess_project_root
-
-TEST_DIR = os.path.join(guess_project_root(), 'tests')
-DATA_FILES_PATH = os.path.join(TEST_DIR, 'data_files')
-
-INPUT_ARGS = [
-    ("string", "TEST_CA_CRT_EC_PEM", DATA_FILES_PATH + "/test-ca2.crt"),
-    ("binary", "TEST_CA_CRT_EC_DER", DATA_FILES_PATH + "/test-ca2.crt.der"),
-    ("string", "TEST_CA_KEY_EC_PEM", DATA_FILES_PATH + "/test-ca2.key.enc"),
-    ("password", "TEST_CA_PWD_EC_PEM", "PolarSSLTest"),
-    ("binary", "TEST_CA_KEY_EC_DER", DATA_FILES_PATH + "/test-ca2.key.der"),
-    ("string", "TEST_CA_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/test-ca-sha256.crt"),
-    ("binary", "TEST_CA_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/test-ca-sha256.crt.der"),
-    ("string", "TEST_CA_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/test-ca-sha1.crt"),
-    ("binary", "TEST_CA_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/test-ca-sha1.crt.der"),
-    ("string", "TEST_CA_KEY_RSA_PEM", DATA_FILES_PATH + "/test-ca.key"),
-    ("password", "TEST_CA_PWD_RSA_PEM", "PolarSSLTest"),
-    ("binary", "TEST_CA_KEY_RSA_DER", DATA_FILES_PATH + "/test-ca.key.der"),
-    ("string", "TEST_SRV_CRT_EC_PEM", DATA_FILES_PATH + "/server5.crt"),
-    ("binary", "TEST_SRV_CRT_EC_DER", DATA_FILES_PATH + "/server5.crt.der"),
-    ("string", "TEST_SRV_KEY_EC_PEM", DATA_FILES_PATH + "/server5.key"),
-    ("binary", "TEST_SRV_KEY_EC_DER", DATA_FILES_PATH + "/server5.key.der"),
-    ("string", "TEST_SRV_CRT_RSA_SHA256_PEM", DATA_FILES_PATH + "/server2-sha256.crt"),
-    ("binary", "TEST_SRV_CRT_RSA_SHA256_DER", DATA_FILES_PATH + "/server2-sha256.crt.der"),
-    ("string", "TEST_SRV_CRT_RSA_SHA1_PEM", DATA_FILES_PATH + "/server2.crt"),
-    ("binary", "TEST_SRV_CRT_RSA_SHA1_DER", DATA_FILES_PATH + "/server2.crt.der"),
-    ("string", "TEST_SRV_KEY_RSA_PEM", DATA_FILES_PATH + "/server2.key"),
-    ("binary", "TEST_SRV_KEY_RSA_DER", DATA_FILES_PATH + "/server2.key.der"),
-    ("string", "TEST_CLI_CRT_EC_PEM", DATA_FILES_PATH + "/cli2.crt"),
-    ("binary", "TEST_CLI_CRT_EC_DER", DATA_FILES_PATH + "/cli2.crt.der"),
-    ("string", "TEST_CLI_KEY_EC_PEM", DATA_FILES_PATH + "/cli2.key"),
-    ("binary", "TEST_CLI_KEY_EC_DER", DATA_FILES_PATH + "/cli2.key.der"),
-    ("string", "TEST_CLI_CRT_RSA_PEM", DATA_FILES_PATH + "/cli-rsa-sha256.crt"),
-    ("binary", "TEST_CLI_CRT_RSA_DER", DATA_FILES_PATH + "/cli-rsa-sha256.crt.der"),
-    ("string", "TEST_CLI_KEY_RSA_PEM", DATA_FILES_PATH + "/cli-rsa.key"),
-    ("binary", "TEST_CLI_KEY_RSA_DER", DATA_FILES_PATH + "/cli-rsa.key.der"),
-]
-
-def main():
-    parser = argparse.ArgumentParser()
-    default_output_path = os.path.join(TEST_DIR, 'src', 'test_certs.h')
-    parser.add_argument('--output', type=str, default=default_output_path)
-    parser.add_argument('--list-dependencies', action='store_true')
-    args = parser.parse_args()
-
-    if args.list_dependencies:
-        files_list = [arg[2] for arg in INPUT_ARGS]
-        print(" ".join(files_list))
-        return
-
-    generate(INPUT_ARGS, output=args.output)
-
-#pylint: disable=dangerous-default-value, unused-argument
-def generate(values=[], output=None):
-    """Generate C header file.
-    """
-    template_loader = jinja2.FileSystemLoader(DATA_FILES_PATH)
-    template_env = jinja2.Environment(
-        loader=template_loader, lstrip_blocks=True, trim_blocks=True,
-        keep_trailing_newline=True)
-
-    def read_as_c_array(filename):
-        with open(filename, 'rb') as f:
-            data = f.read(12)
-            while data:
-                yield ', '.join(['{:#04x}'.format(b) for b in data])
-                data = f.read(12)
-
-    def read_lines(filename):
-        with open(filename) as f:
-            try:
-                for line in f:
-                    yield line.strip()
-            except:
-                print(filename)
-                raise
-
-    def put_to_column(value, position=0):
-        return ' '*position + value
-
-    template_env.filters['read_as_c_array'] = read_as_c_array
-    template_env.filters['read_lines'] = read_lines
-    template_env.filters['put_to_column'] = put_to_column
-
-    template = template_env.get_template('test_certs.h.jinja2')
-
-    with open(output, 'w') as f:
-        f.write(template.render(macros=values))
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/tests/scripts/generate_test_code.py b/tests/scripts/generate_test_code.py
deleted file mode 100755
index 5f711bf..0000000
--- a/tests/scripts/generate_test_code.py
+++ /dev/null
@@ -1,1277 +0,0 @@
-#!/usr/bin/env python3
-# Test suites code generator.
-#
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-"""
-This script is a key part of Mbed TLS test suites framework. For
-understanding the script it is important to understand the
-framework. This doc string contains a summary of the framework
-and explains the function of this script.
-
-Mbed TLS test suites:
-=====================
-Scope:
-------
-The test suites focus on unit testing the crypto primitives and also
-include x509 parser tests. Tests can be added to test any Mbed TLS
-module. However, the framework is not capable of testing SSL
-protocol, since that requires full stack execution and that is best
-tested as part of the system test.
-
-Test case definition:
----------------------
-Tests are defined in a test_suite_<module>[.<optional sub module>].data
-file. A test definition contains:
- test name
- optional build macro dependencies
- test function
- test parameters
-
-Test dependencies are build macros that can be specified to indicate
-the build config in which the test is valid. For example if a test
-depends on a feature that is only enabled by defining a macro. Then
-that macro should be specified as a dependency of the test.
-
-Test function is the function that implements the test steps. This
-function is specified for different tests that perform same steps
-with different parameters.
-
-Test parameters are specified in string form separated by ':'.
-Parameters can be of type string, binary data specified as hex
-string and integer constants specified as integer, macro or
-as an expression. Following is an example test definition:
-
- AES 128 GCM Encrypt and decrypt 8 bytes
- depends_on:MBEDTLS_AES_C:MBEDTLS_GCM_C
- enc_dec_buf:MBEDTLS_CIPHER_AES_128_GCM:"AES-128-GCM":128:8:-1
-
-Test functions:
----------------
-Test functions are coded in C in test_suite_<module>.function files.
-Functions file is itself not compilable and contains special
-format patterns to specify test suite dependencies, start and end
-of functions and function dependencies. Check any existing functions
-file for example.
-
-Execution:
-----------
-Tests are executed in 3 steps:
-- Generating test_suite_<module>[.<optional sub module>].c file
-  for each corresponding .data file.
-- Building each source file into executables.
-- Running each executable and printing report.
-
-Generating C test source requires more than just the test functions.
-Following extras are required:
-- Process main()
-- Reading .data file and dispatching test cases.
-- Platform specific test case execution
-- Dependency checking
-- Integer expression evaluation
-- Test function dispatch
-
-Build dependencies and integer expressions (in the test parameters)
-are specified as strings in the .data file. Their run time value is
-not known at the generation stage. Hence, they need to be translated
-into run time evaluations. This script generates the run time checks
-for dependencies and integer expressions.
-
-Similarly, function names have to be translated into function calls.
-This script also generates code for function dispatch.
-
-The extra code mentioned here is either generated by this script
-or it comes from the input files: helpers file, platform file and
-the template file.
-
-Helper file:
-------------
-Helpers file contains common helper/utility functions and data.
-
-Platform file:
---------------
-Platform file contains platform specific setup code and test case
-dispatch code. For example, host_test.function reads test data
-file from host's file system and dispatches tests.
-
-Template file:
----------
-Template file for example main_test.function is a template C file in
-which generated code and code from input files is substituted to
-generate a compilable C file. It also contains skeleton functions for
-dependency checks, expression evaluation and function dispatch. These
-functions are populated with checks and return codes by this script.
-
-Template file contains "replacement" fields that are formatted
-strings processed by Python string.Template.substitute() method.
-
-This script:
-============
-Core function of this script is to fill the template file with
-code that is generated or read from helpers and platform files.
-
-This script replaces following fields in the template and generates
-the test source file:
-
-__MBEDTLS_TEST_TEMPLATE__TEST_COMMON_HELPERS
-            All common code from helpers.function
-            is substituted here.
-__MBEDTLS_TEST_TEMPLATE__FUNCTIONS_CODE
-            Test functions are substituted here
-            from the input test_suit_xyz.function
-            file. C preprocessor checks are generated
-            for the build dependencies specified
-            in the input file. This script also
-            generates wrappers for the test
-            functions with code to expand the
-            string parameters read from the data
-            file.
-__MBEDTLS_TEST_TEMPLATE__EXPRESSION_CODE
-            This script enumerates the
-            expressions in the .data file and
-            generates code to handle enumerated
-            expression Ids and return the values.
-__MBEDTLS_TEST_TEMPLATE__DEP_CHECK_CODE
-            This script enumerates all
-            build dependencies and generate
-            code to handle enumerated build
-            dependency Id and return status: if
-            the dependency is defined or not.
-__MBEDTLS_TEST_TEMPLATE__DISPATCH_CODE
-            This script enumerates the functions
-            specified in the input test data file
-            and generates the initializer for the
-            function table in the template
-            file.
-__MBEDTLS_TEST_TEMPLATE__PLATFORM_CODE
-            Platform specific setup and test
-            dispatch code.
-
-"""
-
-
-import os
-import re
-import sys
-import string
-import argparse
-
-
-# Types recognized as signed integer arguments in test functions.
-SIGNED_INTEGER_TYPES = frozenset([
-    'char',
-    'short',
-    'short int',
-    'int',
-    'int8_t',
-    'int16_t',
-    'int32_t',
-    'int64_t',
-    'intmax_t',
-    'long',
-    'long int',
-    'long long int',
-    'mbedtls_mpi_sint',
-    'psa_status_t',
-])
-# Types recognized as string arguments in test functions.
-STRING_TYPES = frozenset(['char*', 'const char*', 'char const*'])
-# Types recognized as hex data arguments in test functions.
-DATA_TYPES = frozenset(['data_t*', 'const data_t*', 'data_t const*'])
-
-BEGIN_HEADER_REGEX = r'/\*\s*BEGIN_HEADER\s*\*/'
-END_HEADER_REGEX = r'/\*\s*END_HEADER\s*\*/'
-
-BEGIN_SUITE_HELPERS_REGEX = r'/\*\s*BEGIN_SUITE_HELPERS\s*\*/'
-END_SUITE_HELPERS_REGEX = r'/\*\s*END_SUITE_HELPERS\s*\*/'
-
-BEGIN_DEP_REGEX = r'BEGIN_DEPENDENCIES'
-END_DEP_REGEX = r'END_DEPENDENCIES'
-
-BEGIN_CASE_REGEX = r'/\*\s*BEGIN_CASE\s*(?P<depends_on>.*?)\s*\*/'
-END_CASE_REGEX = r'/\*\s*END_CASE\s*\*/'
-
-DEPENDENCY_REGEX = r'depends_on:(?P<dependencies>.*)'
-C_IDENTIFIER_REGEX = r'!?[a-z_][a-z0-9_]*'
-CONDITION_OPERATOR_REGEX = r'[!=]=|[<>]=?'
-# forbid 0ddd which might be accidentally octal or accidentally decimal
-CONDITION_VALUE_REGEX = r'[-+]?(0x[0-9a-f]+|0|[1-9][0-9]*)'
-CONDITION_REGEX = r'({})(?:\s*({})\s*({}))?$'.format(C_IDENTIFIER_REGEX,
-                                                     CONDITION_OPERATOR_REGEX,
-                                                     CONDITION_VALUE_REGEX)
-TEST_FUNCTION_VALIDATION_REGEX = r'\s*void\s+(?P<func_name>\w+)\s*\('
-FUNCTION_ARG_LIST_END_REGEX = r'.*\)'
-EXIT_LABEL_REGEX = r'^exit:'
-
-
-class GeneratorInputError(Exception):
-    """
-    Exception to indicate error in the input files to this script.
-    This includes missing patterns, test function names and other
-    parsing errors.
-    """
-    pass
-
-
-class FileWrapper:
-    """
-    This class extends the file object with attribute line_no,
-    that indicates line number for the line that is read.
-    """
-
-    def __init__(self, file_name) -> None:
-        """
-        Instantiate the file object and initialize the line number to 0.
-
-        :param file_name: File path to open.
-        """
-        # private mix-in file object
-        self._f = open(file_name, 'rb')
-        self._line_no = 0
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        """
-        This method makes FileWrapper iterable.
-        It counts the line numbers as each line is read.
-
-        :return: Line read from file.
-        """
-        line = self._f.__next__()
-        self._line_no += 1
-        # Convert byte array to string with correct encoding and
-        # strip any whitespaces added in the decoding process.
-        return line.decode(sys.getdefaultencoding()).rstrip()+ '\n'
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self._f.__exit__(exc_type, exc_val, exc_tb)
-
-    @property
-    def line_no(self):
-        """
-        Property that indicates line number for the line that is read.
-        """
-        return self._line_no
-
-    @property
-    def name(self):
-        """
-        Property that indicates name of the file that is read.
-        """
-        return self._f.name
-
-
-def split_dep(dep):
-    """
-    Split NOT character '!' from dependency. Used by gen_dependencies()
-
-    :param dep: Dependency list
-    :return: string tuple. Ex: ('!', MACRO) for !MACRO and ('', MACRO) for
-             MACRO.
-    """
-    return ('!', dep[1:]) if dep[0] == '!' else ('', dep)
-
-
-def gen_dependencies(dependencies):
-    """
-    Test suite data and functions specifies compile time dependencies.
-    This function generates C preprocessor code from the input
-    dependency list. Caller uses the generated preprocessor code to
-    wrap dependent code.
-    A dependency in the input list can have a leading '!' character
-    to negate a condition. '!' is separated from the dependency using
-    function split_dep() and proper preprocessor check is generated
-    accordingly.
-
-    :param dependencies: List of dependencies.
-    :return: if defined and endif code with macro annotations for
-             readability.
-    """
-    dep_start = ''.join(['#if %sdefined(%s)\n' % (x, y) for x, y in
-                         map(split_dep, dependencies)])
-    dep_end = ''.join(['#endif /* %s */\n' %
-                       x for x in reversed(dependencies)])
-
-    return dep_start, dep_end
-
-
-def gen_dependencies_one_line(dependencies):
-    """
-    Similar to gen_dependencies() but generates dependency checks in one line.
-    Useful for generating code with #else block.
-
-    :param dependencies: List of dependencies.
-    :return: Preprocessor check code
-    """
-    defines = '#if ' if dependencies else ''
-    defines += ' && '.join(['%sdefined(%s)' % (x, y) for x, y in map(
-        split_dep, dependencies)])
-    return defines
-
-
-def gen_function_wrapper(name, local_vars, args_dispatch):
-    """
-    Creates test function wrapper code. A wrapper has the code to
-    unpack parameters from parameters[] array.
-
-    :param name: Test function name
-    :param local_vars: Local variables declaration code
-    :param args_dispatch: List of dispatch arguments.
-           Ex: ['(char *) params[0]', '*((int *) params[1])']
-    :return: Test function wrapper.
-    """
-    # Then create the wrapper
-    wrapper = '''
-void {name}_wrapper( void ** params )
-{{
-{unused_params}{locals}
-    {name}( {args} );
-}}
-'''.format(name=name,
-           unused_params='' if args_dispatch else '    (void)params;\n',
-           args=', '.join(args_dispatch),
-           locals=local_vars)
-    return wrapper
-
-
-def gen_dispatch(name, dependencies):
-    """
-    Test suite code template main_test.function defines a C function
-    array to contain test case functions. This function generates an
-    initializer entry for a function in that array. The entry is
-    composed of a compile time check for the test function
-    dependencies. At compile time the test function is assigned when
-    dependencies are met, else NULL is assigned.
-
-    :param name: Test function name
-    :param dependencies: List of dependencies
-    :return: Dispatch code.
-    """
-    if dependencies:
-        preprocessor_check = gen_dependencies_one_line(dependencies)
-        dispatch_code = '''
-{preprocessor_check}
-    {name}_wrapper,
-#else
-    NULL,
-#endif
-'''.format(preprocessor_check=preprocessor_check, name=name)
-    else:
-        dispatch_code = '''
-    {name}_wrapper,
-'''.format(name=name)
-
-    return dispatch_code
-
-
-def parse_until_pattern(funcs_f, end_regex):
-    """
-    Matches pattern end_regex to the lines read from the file object.
-    Returns the lines read until end pattern is matched.
-
-    :param funcs_f: file object for .function file
-    :param end_regex: Pattern to stop parsing
-    :return: Lines read before the end pattern
-    """
-    headers = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
-    for line in funcs_f:
-        if re.search(end_regex, line):
-            break
-        headers += line
-    else:
-        raise GeneratorInputError("file: %s - end pattern [%s] not found!" %
-                                  (funcs_f.name, end_regex))
-
-    return headers
-
-
-def validate_dependency(dependency):
-    """
-    Validates a C macro and raises GeneratorInputError on invalid input.
-    :param dependency: Input macro dependency
-    :return: input dependency stripped of leading & trailing white spaces.
-    """
-    dependency = dependency.strip()
-    if not re.match(CONDITION_REGEX, dependency, re.I):
-        raise GeneratorInputError('Invalid dependency %s' % dependency)
-    return dependency
-
-
-def parse_dependencies(inp_str):
-    """
-    Parses dependencies out of inp_str, validates them and returns a
-    list of macros.
-
-    :param inp_str: Input string with macros delimited by ':'.
-    :return: list of dependencies
-    """
-    dependencies = list(map(validate_dependency, inp_str.split(':')))
-    return dependencies
-
-
-def parse_suite_dependencies(funcs_f):
-    """
-    Parses test suite dependencies specified at the top of a
-    .function file, that starts with pattern BEGIN_DEPENDENCIES
-    and end with END_DEPENDENCIES. Dependencies are specified
-    after pattern 'depends_on:' and are delimited by ':'.
-
-    :param funcs_f: file object for .function file
-    :return: List of test suite dependencies.
-    """
-    dependencies = []
-    for line in funcs_f:
-        match = re.search(DEPENDENCY_REGEX, line.strip())
-        if match:
-            try:
-                dependencies = parse_dependencies(match.group('dependencies'))
-            except GeneratorInputError as error:
-                raise GeneratorInputError(
-                    str(error) + " - %s:%d" % (funcs_f.name, funcs_f.line_no))
-        if re.search(END_DEP_REGEX, line):
-            break
-    else:
-        raise GeneratorInputError("file: %s - end dependency pattern [%s]"
-                                  " not found!" % (funcs_f.name,
-                                                   END_DEP_REGEX))
-
-    return dependencies
-
-
-def parse_function_dependencies(line):
-    """
-    Parses function dependencies, that are in the same line as
-    comment BEGIN_CASE. Dependencies are specified after pattern
-    'depends_on:' and are delimited by ':'.
-
-    :param line: Line from .function file that has dependencies.
-    :return: List of dependencies.
-    """
-    dependencies = []
-    match = re.search(BEGIN_CASE_REGEX, line)
-    dep_str = match.group('depends_on')
-    if dep_str:
-        match = re.search(DEPENDENCY_REGEX, dep_str)
-        if match:
-            dependencies += parse_dependencies(match.group('dependencies'))
-
-    return dependencies
-
-
-ARGUMENT_DECLARATION_REGEX = re.compile(r'(.+?) ?(?:\bconst\b)? ?(\w+)\Z', re.S)
-def parse_function_argument(arg, arg_idx, args, local_vars, args_dispatch):
-    """
-    Parses one test function's argument declaration.
-
-    :param arg: argument declaration.
-    :param arg_idx: current wrapper argument index.
-    :param args: accumulator of arguments' internal types.
-    :param local_vars: accumulator of internal variable declarations.
-    :param args_dispatch: accumulator of argument usage expressions.
-    :return: the number of new wrapper arguments,
-             or None if the argument declaration is invalid.
-    """
-    # Normalize whitespace
-    arg = arg.strip()
-    arg = re.sub(r'\s*\*\s*', r'*', arg)
-    arg = re.sub(r'\s+', r' ', arg)
-    # Extract name and type
-    m = ARGUMENT_DECLARATION_REGEX.search(arg)
-    if not m:
-        # E.g. "int x[42]"
-        return None
-    typ, _ = m.groups()
-    if typ in SIGNED_INTEGER_TYPES:
-        args.append('int')
-        args_dispatch.append('((mbedtls_test_argument_t *) params[%d])->sint' % arg_idx)
-        return 1
-    if typ in STRING_TYPES:
-        args.append('char*')
-        args_dispatch.append('(char *) params[%d]' % arg_idx)
-        return 1
-    if typ in DATA_TYPES:
-        args.append('hex')
-        # create a structure
-        pointer_initializer = '(uint8_t *) params[%d]' % arg_idx
-        len_initializer = '((mbedtls_test_argument_t *) params[%d])->len' % (arg_idx+1)
-        local_vars.append('    data_t data%d = {%s, %s};\n' %
-                          (arg_idx, pointer_initializer, len_initializer))
-        args_dispatch.append('&data%d' % arg_idx)
-        return 2
-    return None
-
-ARGUMENT_LIST_REGEX = re.compile(r'\((.*?)\)', re.S)
-def parse_function_arguments(line):
-    """
-    Parses test function signature for validation and generates
-    a dispatch wrapper function that translates input test vectors
-    read from the data file into test function arguments.
-
-    :param line: Line from .function file that has a function
-                 signature.
-    :return: argument list, local variables for
-             wrapper function and argument dispatch code.
-    """
-    # Process arguments, ex: <type> arg1, <type> arg2 )
-    # This script assumes that the argument list is terminated by ')'
-    # i.e. the test functions will not have a function pointer
-    # argument.
-    m = ARGUMENT_LIST_REGEX.search(line)
-    arg_list = m.group(1).strip()
-    if arg_list in ['', 'void']:
-        return [], '', []
-    args = []
-    local_vars = []
-    args_dispatch = []
-    arg_idx = 0
-    for arg in arg_list.split(','):
-        indexes = parse_function_argument(arg, arg_idx,
-                                          args, local_vars, args_dispatch)
-        if indexes is None:
-            raise ValueError("Test function arguments can only be 'int', "
-                             "'char *' or 'data_t'\n%s" % line)
-        arg_idx += indexes
-
-    return args, ''.join(local_vars), args_dispatch
-
-
-def generate_function_code(name, code, local_vars, args_dispatch,
-                           dependencies):
-    """
-    Generate function code with preprocessor checks and parameter dispatch
-    wrapper.
-
-    :param name: Function name
-    :param code: Function code
-    :param local_vars: Local variables for function wrapper
-    :param args_dispatch: Argument dispatch code
-    :param dependencies: Preprocessor dependencies list
-    :return: Final function code
-    """
-    # Add exit label if not present
-    if code.find('exit:') == -1:
-        split_code = code.rsplit('}', 1)
-        if len(split_code) == 2:
-            code = """exit:
-    ;
-}""".join(split_code)
-
-    code += gen_function_wrapper(name, local_vars, args_dispatch)
-    preprocessor_check_start, preprocessor_check_end = \
-        gen_dependencies(dependencies)
-    return preprocessor_check_start + code + preprocessor_check_end
-
-COMMENT_START_REGEX = re.compile(r'/[*/]')
-
-def skip_comments(line, stream):
-    """Remove comments in line.
-
-    If the line contains an unfinished comment, read more lines from stream
-    until the line that contains the comment.
-
-    :return: The original line with inner comments replaced by spaces.
-             Trailing comments and whitespace may be removed completely.
-    """
-    pos = 0
-    while True:
-        opening = COMMENT_START_REGEX.search(line, pos)
-        if not opening:
-            break
-        if line[opening.start(0) + 1] == '/': # //...
-            continuation = line
-            # Count the number of line breaks, to keep line numbers aligned
-            # in the output.
-            line_count = 1
-            while continuation.endswith('\\\n'):
-                # This errors out if the file ends with an unfinished line
-                # comment. That's acceptable to not complicate the code further.
-                continuation = next(stream)
-                line_count += 1
-            return line[:opening.start(0)].rstrip() + '\n' * line_count
-        # Parsing /*...*/, looking for the end
-        closing = line.find('*/', opening.end(0))
-        while closing == -1:
-            # This errors out if the file ends with an unfinished block
-            # comment. That's acceptable to not complicate the code further.
-            line += next(stream)
-            closing = line.find('*/', opening.end(0))
-        pos = closing + 2
-        # Replace inner comment by spaces. There needs to be at least one space
-        # for things like 'int/*ihatespaces*/foo'. Go further and preserve the
-        # width of the comment and line breaks, this way positions in error
-        # messages remain correct.
-        line = (line[:opening.start(0)] +
-                re.sub(r'.', r' ', line[opening.start(0):pos]) +
-                line[pos:])
-    # Strip whitespace at the end of lines (it's irrelevant to error messages).
-    return re.sub(r' +(\n|\Z)', r'\1', line)
-
-def parse_function_code(funcs_f, dependencies, suite_dependencies):
-    """
-    Parses out a function from function file object and generates
-    function and dispatch code.
-
-    :param funcs_f: file object of the functions file.
-    :param dependencies: List of dependencies
-    :param suite_dependencies: List of test suite dependencies
-    :return: Function name, arguments, function code and dispatch code.
-    """
-    line_directive = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
-    code = ''
-    has_exit_label = False
-    for line in funcs_f:
-        # Check function signature. Function signature may be split
-        # across multiple lines. Here we try to find the start of
-        # arguments list, then remove '\n's and apply the regex to
-        # detect function start.
-        line = skip_comments(line, funcs_f)
-        up_to_arg_list_start = code + line[:line.find('(') + 1]
-        match = re.match(TEST_FUNCTION_VALIDATION_REGEX,
-                         up_to_arg_list_start.replace('\n', ' '), re.I)
-        if match:
-            # check if we have full signature i.e. split in more lines
-            name = match.group('func_name')
-            if not re.match(FUNCTION_ARG_LIST_END_REGEX, line):
-                for lin in funcs_f:
-                    line += skip_comments(lin, funcs_f)
-                    if re.search(FUNCTION_ARG_LIST_END_REGEX, line):
-                        break
-            args, local_vars, args_dispatch = parse_function_arguments(
-                line)
-            code += line
-            break
-        code += line
-    else:
-        raise GeneratorInputError("file: %s - Test functions not found!" %
-                                  funcs_f.name)
-
-    # Prefix test function name with 'test_'
-    code = code.replace(name, 'test_' + name, 1)
-    name = 'test_' + name
-
-    # If a test function has no arguments then add 'void' argument to
-    # avoid "-Wstrict-prototypes" warnings from clang
-    if len(args) == 0:
-        code = code.replace('()', '(void)', 1)
-
-    for line in funcs_f:
-        if re.search(END_CASE_REGEX, line):
-            break
-        if not has_exit_label:
-            has_exit_label = \
-                re.search(EXIT_LABEL_REGEX, line.strip()) is not None
-        code += line
-    else:
-        raise GeneratorInputError("file: %s - end case pattern [%s] not "
-                                  "found!" % (funcs_f.name, END_CASE_REGEX))
-
-    code = line_directive + code
-    code = generate_function_code(name, code, local_vars, args_dispatch,
-                                  dependencies)
-    dispatch_code = gen_dispatch(name, suite_dependencies + dependencies)
-    return (name, args, code, dispatch_code)
-
-
-def parse_functions(funcs_f):
-    """
-    Parses a test_suite_xxx.function file and returns information
-    for generating a C source file for the test suite.
-
-    :param funcs_f: file object of the functions file.
-    :return: List of test suite dependencies, test function dispatch
-             code, function code and a dict with function identifiers
-             and arguments info.
-    """
-    suite_helpers = ''
-    suite_dependencies = []
-    suite_functions = ''
-    func_info = {}
-    function_idx = 0
-    dispatch_code = ''
-    for line in funcs_f:
-        if re.search(BEGIN_HEADER_REGEX, line):
-            suite_helpers += parse_until_pattern(funcs_f, END_HEADER_REGEX)
-        elif re.search(BEGIN_SUITE_HELPERS_REGEX, line):
-            suite_helpers += parse_until_pattern(funcs_f,
-                                                 END_SUITE_HELPERS_REGEX)
-        elif re.search(BEGIN_DEP_REGEX, line):
-            suite_dependencies += parse_suite_dependencies(funcs_f)
-        elif re.search(BEGIN_CASE_REGEX, line):
-            try:
-                dependencies = parse_function_dependencies(line)
-            except GeneratorInputError as error:
-                raise GeneratorInputError(
-                    "%s:%d: %s" % (funcs_f.name, funcs_f.line_no,
-                                   str(error)))
-            func_name, args, func_code, func_dispatch =\
-                parse_function_code(funcs_f, dependencies, suite_dependencies)
-            suite_functions += func_code
-            # Generate dispatch code and enumeration info
-            if func_name in func_info:
-                raise GeneratorInputError(
-                    "file: %s - function %s re-declared at line %d" %
-                    (funcs_f.name, func_name, funcs_f.line_no))
-            func_info[func_name] = (function_idx, args)
-            dispatch_code += '/* Function Id: %d */\n' % function_idx
-            dispatch_code += func_dispatch
-            function_idx += 1
-
-    func_code = (suite_helpers +
-                 suite_functions).join(gen_dependencies(suite_dependencies))
-    return suite_dependencies, dispatch_code, func_code, func_info
-
-
-def escaped_split(inp_str, split_char):
-    """
-    Split inp_str on character split_char but ignore if escaped.
-    Since, return value is used to write back to the intermediate
-    data file, any escape characters in the input are retained in the
-    output.
-
-    :param inp_str: String to split
-    :param split_char: Split character
-    :return: List of splits
-    """
-    if len(split_char) > 1:
-        raise ValueError('Expected split character. Found string!')
-    out = re.sub(r'(\\.)|' + split_char,
-                 lambda m: m.group(1) or '\n', inp_str,
-                 len(inp_str)).split('\n')
-    out = [x for x in out if x]
-    return out
-
-
-def parse_test_data(data_f):
-    """
-    Parses .data file for each test case name, test function name,
-    test dependencies and test arguments. This information is
-    correlated with the test functions file for generating an
-    intermediate data file replacing the strings for test function
-    names, dependencies and integer constant expressions with
-    identifiers. Mainly for optimising space for on-target
-    execution.
-
-    :param data_f: file object of the data file.
-    :return: Generator that yields line number, test name, function name,
-             dependency list and function argument list.
-    """
-    __state_read_name = 0
-    __state_read_args = 1
-    state = __state_read_name
-    dependencies = []
-    name = ''
-    for line in data_f:
-        line = line.strip()
-        # Skip comments
-        if line.startswith('#'):
-            continue
-
-        # Blank line indicates end of test
-        if not line:
-            if state == __state_read_args:
-                raise GeneratorInputError("[%s:%d] Newline before arguments. "
-                                          "Test function and arguments "
-                                          "missing for %s" %
-                                          (data_f.name, data_f.line_no, name))
-            continue
-
-        if state == __state_read_name:
-            # Read test name
-            name = line
-            state = __state_read_args
-        elif state == __state_read_args:
-            # Check dependencies
-            match = re.search(DEPENDENCY_REGEX, line)
-            if match:
-                try:
-                    dependencies = parse_dependencies(
-                        match.group('dependencies'))
-                except GeneratorInputError as error:
-                    raise GeneratorInputError(
-                        str(error) + " - %s:%d" %
-                        (data_f.name, data_f.line_no))
-            else:
-                # Read test vectors
-                parts = escaped_split(line, ':')
-                test_function = parts[0]
-                args = parts[1:]
-                yield data_f.line_no, name, test_function, dependencies, args
-                dependencies = []
-                state = __state_read_name
-    if state == __state_read_args:
-        raise GeneratorInputError("[%s:%d] Newline before arguments. "
-                                  "Test function and arguments missing for "
-                                  "%s" % (data_f.name, data_f.line_no, name))
-
-
-def gen_dep_check(dep_id, dep):
-    """
-    Generate code for checking dependency with the associated
-    identifier.
-
-    :param dep_id: Dependency identifier
-    :param dep: Dependency macro
-    :return: Dependency check code
-    """
-    if dep_id < 0:
-        raise GeneratorInputError("Dependency Id should be a positive "
-                                  "integer.")
-    _not, dep = ('!', dep[1:]) if dep[0] == '!' else ('', dep)
-    if not dep:
-        raise GeneratorInputError("Dependency should not be an empty string.")
-
-    dependency = re.match(CONDITION_REGEX, dep, re.I)
-    if not dependency:
-        raise GeneratorInputError('Invalid dependency %s' % dep)
-
-    _defined = '' if dependency.group(2) else 'defined'
-    _cond = dependency.group(2) if dependency.group(2) else ''
-    _value = dependency.group(3) if dependency.group(3) else ''
-
-    dep_check = '''
-        case {id}:
-            {{
-#if {_not}{_defined}({macro}{_cond}{_value})
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }}
-            break;'''.format(_not=_not, _defined=_defined,
-                             macro=dependency.group(1), id=dep_id,
-                             _cond=_cond, _value=_value)
-    return dep_check
-
-
-def gen_expression_check(exp_id, exp):
-    """
-    Generates code for evaluating an integer expression using
-    associated expression Id.
-
-    :param exp_id: Expression Identifier
-    :param exp: Expression/Macro
-    :return: Expression check code
-    """
-    if exp_id < 0:
-        raise GeneratorInputError("Expression Id should be a positive "
-                                  "integer.")
-    if not exp:
-        raise GeneratorInputError("Expression should not be an empty string.")
-    exp_code = '''
-        case {exp_id}:
-            {{
-                *out_value = {expression};
-            }}
-            break;'''.format(exp_id=exp_id, expression=exp)
-    return exp_code
-
-
-def write_dependencies(out_data_f, test_dependencies, unique_dependencies):
-    """
-    Write dependencies to intermediate test data file, replacing
-    the string form with identifiers. Also, generates dependency
-    check code.
-
-    :param out_data_f: Output intermediate data file
-    :param test_dependencies: Dependencies
-    :param unique_dependencies: Mutable list to track unique dependencies
-           that are global to this re-entrant function.
-    :return: returns dependency check code.
-    """
-    dep_check_code = ''
-    if test_dependencies:
-        out_data_f.write('depends_on')
-        for dep in test_dependencies:
-            if dep not in unique_dependencies:
-                unique_dependencies.append(dep)
-                dep_id = unique_dependencies.index(dep)
-                dep_check_code += gen_dep_check(dep_id, dep)
-            else:
-                dep_id = unique_dependencies.index(dep)
-            out_data_f.write(':' + str(dep_id))
-        out_data_f.write('\n')
-    return dep_check_code
-
-
-INT_VAL_REGEX = re.compile(r'-?(\d+|0x[0-9a-f]+)$', re.I)
-def val_is_int(val: str) -> bool:
-    """Whether val is suitable as an 'int' parameter in the .datax file."""
-    if not INT_VAL_REGEX.match(val):
-        return False
-    # Limit the range to what is guaranteed to get through strtol()
-    return abs(int(val, 0)) <= 0x7fffffff
-
-def write_parameters(out_data_f, test_args, func_args, unique_expressions):
-    """
-    Writes test parameters to the intermediate data file, replacing
-    the string form with identifiers. Also, generates expression
-    check code.
-
-    :param out_data_f: Output intermediate data file
-    :param test_args: Test parameters
-    :param func_args: Function arguments
-    :param unique_expressions: Mutable list to track unique
-           expressions that are global to this re-entrant function.
-    :return: Returns expression check code.
-    """
-    expression_code = ''
-    for i, _ in enumerate(test_args):
-        typ = func_args[i]
-        val = test_args[i]
-
-        # Pass small integer constants literally. This reduces the size of
-        # the C code. Register anything else as an expression.
-        if typ == 'int' and not val_is_int(val):
-            typ = 'exp'
-            if val not in unique_expressions:
-                unique_expressions.append(val)
-                # exp_id can be derived from len(). But for
-                # readability and consistency with case of existing
-                # let's use index().
-                exp_id = unique_expressions.index(val)
-                expression_code += gen_expression_check(exp_id, val)
-                val = exp_id
-            else:
-                val = unique_expressions.index(val)
-        out_data_f.write(':' + typ + ':' + str(val))
-    out_data_f.write('\n')
-    return expression_code
-
-
-def gen_suite_dep_checks(suite_dependencies, dep_check_code, expression_code):
-    """
-    Generates preprocessor checks for test suite dependencies.
-
-    :param suite_dependencies: Test suite dependencies read from the
-            .function file.
-    :param dep_check_code: Dependency check code
-    :param expression_code: Expression check code
-    :return: Dependency and expression code guarded by test suite
-             dependencies.
-    """
-    if suite_dependencies:
-        preprocessor_check = gen_dependencies_one_line(suite_dependencies)
-        dep_check_code = '''
-{preprocessor_check}
-{code}
-#endif
-'''.format(preprocessor_check=preprocessor_check, code=dep_check_code)
-        expression_code = '''
-{preprocessor_check}
-{code}
-#endif
-'''.format(preprocessor_check=preprocessor_check, code=expression_code)
-    return dep_check_code, expression_code
-
-
-def get_function_info(func_info, function_name, line_no):
-    """Look up information about a test function by name.
-
-    Raise an informative expression if function_name is not found.
-
-    :param func_info: dictionary mapping function names to their information.
-    :param function_name: the function name as written in the .function and
-                          .data files.
-    :param line_no: line number for error messages.
-    :return Function information (id, args).
-    """
-    test_function_name = 'test_' + function_name
-    if test_function_name not in func_info:
-        raise GeneratorInputError("%d: Function %s not found!" %
-                                  (line_no, test_function_name))
-    return func_info[test_function_name]
-
-
-def gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies):
-    """
-    This function reads test case name, dependencies and test vectors
-    from the .data file. This information is correlated with the test
-    functions file for generating an intermediate data file replacing
-    the strings for test function names, dependencies and integer
-    constant expressions with identifiers. Mainly for optimising
-    space for on-target execution.
-    It also generates test case dependency check code and expression
-    evaluation code.
-
-    :param data_f: Data file object
-    :param out_data_f: Output intermediate data file
-    :param func_info: Dict keyed by function and with function id
-           and arguments info
-    :param suite_dependencies: Test suite dependencies
-    :return: Returns dependency and expression check code
-    """
-    unique_dependencies = []
-    unique_expressions = []
-    dep_check_code = ''
-    expression_code = ''
-    for line_no, test_name, function_name, test_dependencies, test_args in \
-            parse_test_data(data_f):
-        out_data_f.write(test_name + '\n')
-
-        # Write dependencies
-        dep_check_code += write_dependencies(out_data_f, test_dependencies,
-                                             unique_dependencies)
-
-        # Write test function name
-        func_id, func_args = \
-            get_function_info(func_info, function_name, line_no)
-        out_data_f.write(str(func_id))
-
-        # Write parameters
-        if len(test_args) != len(func_args):
-            raise GeneratorInputError("%d: Invalid number of arguments in test "
-                                      "%s. See function %s signature." %
-                                      (line_no, test_name, function_name))
-        expression_code += write_parameters(out_data_f, test_args, func_args,
-                                            unique_expressions)
-
-        # Write a newline as test case separator
-        out_data_f.write('\n')
-
-    dep_check_code, expression_code = gen_suite_dep_checks(
-        suite_dependencies, dep_check_code, expression_code)
-    return dep_check_code, expression_code
-
-
-def add_input_info(funcs_file, data_file, template_file,
-                   c_file, snippets):
-    """
-    Add generator input info in snippets.
-
-    :param funcs_file: Functions file object
-    :param data_file: Data file object
-    :param template_file: Template file object
-    :param c_file: Output C file object
-    :param snippets: Dictionary to contain code pieces to be
-                     substituted in the template.
-    :return:
-    """
-    snippets['test_file'] = c_file
-    snippets['test_main_file'] = template_file
-    snippets['test_case_file'] = funcs_file
-    snippets['test_case_data_file'] = data_file
-
-
-def read_code_from_input_files(platform_file, helpers_file,
-                               out_data_file, snippets):
-    """
-    Read code from input files and create substitutions for replacement
-    strings in the template file.
-
-    :param platform_file: Platform file object
-    :param helpers_file: Helper functions file object
-    :param out_data_file: Output intermediate data file object
-    :param snippets: Dictionary to contain code pieces to be
-                     substituted in the template.
-    :return:
-    """
-    # Read helpers
-    with open(helpers_file, 'r') as help_f, open(platform_file, 'r') as \
-            platform_f:
-        snippets['test_common_helper_file'] = helpers_file
-        snippets['test_common_helpers'] = help_f.read()
-        snippets['test_platform_file'] = platform_file
-        snippets['platform_code'] = platform_f.read().replace(
-            'DATA_FILE', out_data_file.replace('\\', '\\\\'))  # escape '\'
-
-
-def write_test_source_file(template_file, c_file, snippets):
-    """
-    Write output source file with generated source code.
-
-    :param template_file: Template file name
-    :param c_file: Output source file
-    :param snippets: Generated and code snippets
-    :return:
-    """
-
-    # Create a placeholder pattern with the correct named capture groups
-    # to override the default provided with Template.
-    # Match nothing (no way of escaping placeholders).
-    escaped = "(?P<escaped>(?!))"
-    # Match the "__MBEDTLS_TEST_TEMPLATE__PLACEHOLDER_NAME" pattern.
-    named = "__MBEDTLS_TEST_TEMPLATE__(?P<named>[A-Z][_A-Z0-9]*)"
-    # Match nothing (no braced placeholder syntax).
-    braced = "(?P<braced>(?!))"
-    # If not already matched, a "__MBEDTLS_TEST_TEMPLATE__" prefix is invalid.
-    invalid = "(?P<invalid>__MBEDTLS_TEST_TEMPLATE__)"
-    placeholder_pattern = re.compile("|".join([escaped, named, braced, invalid]))
-
-    with open(template_file, 'r') as template_f, open(c_file, 'w') as c_f:
-        for line_no, line in enumerate(template_f.readlines(), 1):
-            # Update line number. +1 as #line directive sets next line number
-            snippets['line_no'] = line_no + 1
-            template = string.Template(line)
-            template.pattern = placeholder_pattern
-            snippets = {k.upper():v for (k, v) in snippets.items()}
-            code = template.substitute(**snippets)
-            c_f.write(code)
-
-
-def parse_function_file(funcs_file, snippets):
-    """
-    Parse function file and generate function dispatch code.
-
-    :param funcs_file: Functions file name
-    :param snippets: Dictionary to contain code pieces to be
-                     substituted in the template.
-    :return:
-    """
-    with FileWrapper(funcs_file) as funcs_f:
-        suite_dependencies, dispatch_code, func_code, func_info = \
-            parse_functions(funcs_f)
-        snippets['functions_code'] = func_code
-        snippets['dispatch_code'] = dispatch_code
-        return suite_dependencies, func_info
-
-
-def generate_intermediate_data_file(data_file, out_data_file,
-                                    suite_dependencies, func_info, snippets):
-    """
-    Generates intermediate data file from input data file and
-    information read from functions file.
-
-    :param data_file: Data file name
-    :param out_data_file: Output/Intermediate data file
-    :param suite_dependencies: List of suite dependencies.
-    :param func_info: Function info parsed from functions file.
-    :param snippets: Dictionary to contain code pieces to be
-                     substituted in the template.
-    :return:
-    """
-    with FileWrapper(data_file) as data_f, \
-            open(out_data_file, 'w') as out_data_f:
-        dep_check_code, expression_code = gen_from_test_data(
-            data_f, out_data_f, func_info, suite_dependencies)
-        snippets['dep_check_code'] = dep_check_code
-        snippets['expression_code'] = expression_code
-
-
-def generate_code(**input_info):
-    """
-    Generates C source code from test suite file, data file, common
-    helpers file and platform file.
-
-    input_info expands to following parameters:
-    funcs_file: Functions file object
-    data_file: Data file object
-    template_file: Template file object
-    platform_file: Platform file object
-    helpers_file: Helper functions file object
-    suites_dir: Test suites dir
-    c_file: Output C file object
-    out_data_file: Output intermediate data file object
-    :return:
-    """
-    funcs_file = input_info['funcs_file']
-    data_file = input_info['data_file']
-    template_file = input_info['template_file']
-    platform_file = input_info['platform_file']
-    helpers_file = input_info['helpers_file']
-    suites_dir = input_info['suites_dir']
-    c_file = input_info['c_file']
-    out_data_file = input_info['out_data_file']
-    for name, path in [('Functions file', funcs_file),
-                       ('Data file', data_file),
-                       ('Template file', template_file),
-                       ('Platform file', platform_file),
-                       ('Helpers code file', helpers_file),
-                       ('Suites dir', suites_dir)]:
-        if not os.path.exists(path):
-            raise IOError("ERROR: %s [%s] not found!" % (name, path))
-
-    snippets = {'generator_script': os.path.basename(__file__)}
-    read_code_from_input_files(platform_file, helpers_file,
-                               out_data_file, snippets)
-    add_input_info(funcs_file, data_file, template_file,
-                   c_file, snippets)
-    suite_dependencies, func_info = parse_function_file(funcs_file, snippets)
-    generate_intermediate_data_file(data_file, out_data_file,
-                                    suite_dependencies, func_info, snippets)
-    write_test_source_file(template_file, c_file, snippets)
-
-
-def main():
-    """
-    Command line parser.
-
-    :return:
-    """
-    parser = argparse.ArgumentParser(
-        description='Dynamically generate test suite code.')
-
-    parser.add_argument("-f", "--functions-file",
-                        dest="funcs_file",
-                        help="Functions file",
-                        metavar="FUNCTIONS_FILE",
-                        required=True)
-
-    parser.add_argument("-d", "--data-file",
-                        dest="data_file",
-                        help="Data file",
-                        metavar="DATA_FILE",
-                        required=True)
-
-    parser.add_argument("-t", "--template-file",
-                        dest="template_file",
-                        help="Template file",
-                        metavar="TEMPLATE_FILE",
-                        required=True)
-
-    parser.add_argument("-s", "--suites-dir",
-                        dest="suites_dir",
-                        help="Suites dir",
-                        metavar="SUITES_DIR",
-                        required=True)
-
-    parser.add_argument("--helpers-file",
-                        dest="helpers_file",
-                        help="Helpers file",
-                        metavar="HELPERS_FILE",
-                        required=True)
-
-    parser.add_argument("-p", "--platform-file",
-                        dest="platform_file",
-                        help="Platform code file",
-                        metavar="PLATFORM_FILE",
-                        required=True)
-
-    parser.add_argument("-o", "--out-dir",
-                        dest="out_dir",
-                        help="Dir where generated code and scripts are copied",
-                        metavar="OUT_DIR",
-                        required=True)
-
-    args = parser.parse_args()
-
-    data_file_name = os.path.basename(args.data_file)
-    data_name = os.path.splitext(data_file_name)[0]
-
-    out_c_file = os.path.join(args.out_dir, data_name + '.c')
-    out_data_file = os.path.join(args.out_dir, data_name + '.datax')
-
-    out_c_file_dir = os.path.dirname(out_c_file)
-    out_data_file_dir = os.path.dirname(out_data_file)
-    for directory in [out_c_file_dir, out_data_file_dir]:
-        if not os.path.exists(directory):
-            os.makedirs(directory)
-
-    generate_code(funcs_file=args.funcs_file, data_file=args.data_file,
-                  template_file=args.template_file,
-                  platform_file=args.platform_file,
-                  helpers_file=args.helpers_file, suites_dir=args.suites_dir,
-                  c_file=out_c_file, out_data_file=out_data_file)
-
-
-if __name__ == "__main__":
-    try:
-        main()
-    except GeneratorInputError as err:
-        sys.exit("%s: input error: %s" %
-                 (os.path.basename(sys.argv[0]), str(err)))
diff --git a/tests/scripts/generate_test_keys.py b/tests/scripts/generate_test_keys.py
deleted file mode 100755
index 9946c24..0000000
--- a/tests/scripts/generate_test_keys.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-"""Module generating EC and RSA keys to be used in test_suite_pk instead of
-generating the required key at run time. This helps speeding up testing."""
-
-from typing import Iterator, List, Tuple
-import re
-import argparse
-import scripts_path # pylint: disable=unused-import
-from mbedtls_framework.asymmetric_key_data import ASYMMETRIC_KEY_DATA
-from mbedtls_framework.build_tree import guess_project_root
-
-BYTES_PER_LINE = 16
-
-def c_byte_array_literal_content(array_name: str, key_data: bytes) -> Iterator[str]:
-    yield 'const unsigned char '
-    yield array_name
-    yield '[] = {'
-    for index in range(0, len(key_data), BYTES_PER_LINE):
-        yield '\n   '
-        for b in key_data[index:index + BYTES_PER_LINE]:
-            yield ' {:#04x},'.format(b)
-    yield '\n};'
-
-def convert_der_to_c(array_name: str, key_data: bytes) -> str:
-    return ''.join(c_byte_array_literal_content(array_name, key_data))
-
-def get_key_type(key: str) -> str:
-    if re.match('PSA_KEY_TYPE_RSA_.*', key):
-        return "rsa"
-    elif re.match('PSA_KEY_TYPE_ECC_.*', key):
-        return "ec"
-    else:
-        print("Unhandled key type {}".format(key))
-        return "unknown"
-
-def get_ec_key_family(key: str) -> str:
-    match = re.search(r'.*\((.*)\)', key)
-    if match is None:
-        raise Exception("Unable to get EC family from {}".format(key))
-    return match.group(1)
-
-# Legacy EC group ID do not support all the key types that PSA does, so the
-# following dictionaries are used for:
-# - getting prefix/suffix for legacy curve names
-# - understand if the curve is supported in legacy symbols (MBEDTLS_ECP_DP_...)
-EC_NAME_CONVERSION = {
-    'PSA_ECC_FAMILY_SECP_K1': {
-        192: ('secp', 'k1'),
-        224: ('secp', 'k1'),
-        256: ('secp', 'k1')
-    },
-    'PSA_ECC_FAMILY_SECP_R1': {
-        192: ('secp', 'r1'),
-        224: ('secp', 'r1'),
-        256: ('secp', 'r1'),
-        384: ('secp', 'r1'),
-        521: ('secp', 'r1')
-    },
-    'PSA_ECC_FAMILY_BRAINPOOL_P_R1': {
-        256: ('bp', 'r1'),
-        384: ('bp', 'r1'),
-        512: ('bp', 'r1')
-    },
-    'PSA_ECC_FAMILY_MONTGOMERY': {
-        255: ('curve', '19'),
-        448: ('curve', '')
-    }
-}
-
-def get_ec_curve_name(priv_key: str, bits: int) -> str:
-    ec_family = get_ec_key_family(priv_key)
-    try:
-        prefix = EC_NAME_CONVERSION[ec_family][bits][0]
-        suffix = EC_NAME_CONVERSION[ec_family][bits][1]
-    except KeyError:
-        return ""
-    return prefix + str(bits) + suffix
-
-def get_look_up_table_entry(key_type: str, group_id_or_keybits: str,
-                            priv_array_name: str, pub_array_name: str) -> Iterator[str]:
-    if key_type == "ec":
-        yield "    {{ {}, 0,\n".format(group_id_or_keybits)
-    else:
-        yield "    {{ 0, {},\n".format(group_id_or_keybits)
-    yield "      {0}, sizeof({0}),\n".format(priv_array_name)
-    yield "      {0}, sizeof({0}) }},".format(pub_array_name)
-
-
-def write_output_file(output_file_name: str, arrays: str, look_up_table: str):
-    with open(output_file_name, 'wt') as output:
-        output.write("""\
-/*********************************************************************************
- * This file was automatically generated from tests/scripts/generate_test_keys.py.
- * Please do not edit it manually.
- *********************************************************************************/
-""")
-        output.write(arrays)
-        output.write("""
-struct predefined_key_element {{
-    int group_id;  // EC group ID; 0 for RSA keys
-    int keybits;  // bits size of RSA key; 0 for EC keys
-    const unsigned char *priv_key;
-    size_t priv_key_len;
-    const unsigned char *pub_key;
-    size_t pub_key_len;
-}};
-
-struct predefined_key_element predefined_keys[] = {{
-{}
-}};
-
-/* End of generated file */
-""".format(look_up_table))
-
-def collect_keys() -> Tuple[str, str]:
-    """"
-    This function reads key data from ASYMMETRIC_KEY_DATA and, only for the
-    keys supported in legacy ECP/RSA modules, it returns 2 strings:
-    - the 1st contains C arrays declaration of these keys and
-    - the 2nd contains the final look-up table for all these arrays.
-    """
-    arrays = []
-    look_up_table = []
-
-    # Get a list of private keys only in order to get a single item for every
-    # (key type, key bits) pair. We know that ASYMMETRIC_KEY_DATA
-    # contains also the public counterpart.
-    priv_keys = [key for key in ASYMMETRIC_KEY_DATA if '_KEY_PAIR' in key]
-    priv_keys = sorted(priv_keys)
-
-    for priv_key in priv_keys:
-        key_type = get_key_type(priv_key)
-        # Ignore keys which are not EC or RSA
-        if key_type == "unknown":
-            continue
-
-        pub_key = re.sub('_KEY_PAIR', '_PUBLIC_KEY', priv_key)
-
-        for bits in ASYMMETRIC_KEY_DATA[priv_key]:
-            if key_type == "ec":
-                curve = get_ec_curve_name(priv_key, bits)
-                # Ignore EC curves unsupported in legacy symbols
-                if curve == "":
-                    continue
-            # Create output array name
-            if key_type == "rsa":
-                array_name_base = "_".join(["test", key_type, str(bits)])
-            else:
-                array_name_base = "_".join(["test", key_type, curve])
-            array_name_priv = array_name_base + "_priv"
-            array_name_pub = array_name_base + "_pub"
-            # Convert bytearray to C array
-            c_array_priv = convert_der_to_c(array_name_priv, ASYMMETRIC_KEY_DATA[priv_key][bits])
-            c_array_pub = convert_der_to_c(array_name_pub, ASYMMETRIC_KEY_DATA[pub_key][bits])
-            # Write the C array to the output file
-            arrays.append(''.join(["\n", c_array_priv, "\n", c_array_pub, "\n"]))
-            # Update the lookup table
-            if key_type == "ec":
-                group_id_or_keybits = "MBEDTLS_ECP_DP_" + curve.upper()
-            else:
-                group_id_or_keybits = str(bits)
-            look_up_table.append(''.join(get_look_up_table_entry(key_type, group_id_or_keybits,
-                                                                 array_name_priv, array_name_pub)))
-
-    return ''.join(arrays), '\n'.join(look_up_table)
-
-def main() -> None:
-    default_output_path = guess_project_root() + "/tests/src/test_keys.h"
-
-    argparser = argparse.ArgumentParser()
-    argparser.add_argument("--output", help="Output file", default=default_output_path)
-    args = argparser.parse_args()
-
-    output_file = args.output
-
-    arrays, look_up_table = collect_keys()
-
-    write_output_file(output_file, arrays, look_up_table)
-
-if __name__ == '__main__':
-    main()
diff --git a/tests/scripts/test_generate_test_code.py b/tests/scripts/test_generate_test_code.py
deleted file mode 100755
index abc46a7..0000000
--- a/tests/scripts/test_generate_test_code.py
+++ /dev/null
@@ -1,1915 +0,0 @@
-#!/usr/bin/env python3
-# Unit test for generate_test_code.py
-#
-# Copyright The Mbed TLS Contributors
-# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
-
-"""
-Unit tests for generate_test_code.py
-"""
-
-from io import StringIO
-from unittest import TestCase, main as unittest_main
-from unittest.mock import patch
-
-from generate_test_code import gen_dependencies, gen_dependencies_one_line
-from generate_test_code import gen_function_wrapper, gen_dispatch
-from generate_test_code import parse_until_pattern, GeneratorInputError
-from generate_test_code import parse_suite_dependencies
-from generate_test_code import parse_function_dependencies
-from generate_test_code import parse_function_arguments, parse_function_code
-from generate_test_code import parse_functions, END_HEADER_REGEX
-from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split
-from generate_test_code import parse_test_data, gen_dep_check
-from generate_test_code import gen_expression_check, write_dependencies
-from generate_test_code import write_parameters, gen_suite_dep_checks
-from generate_test_code import gen_from_test_data
-
-
-class GenDep(TestCase):
-    """
-    Test suite for function gen_dep()
-    """
-
-    def test_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['DEP1', 'DEP2']
-        dep_start, dep_end = gen_dependencies(dependencies)
-        preprocessor1, preprocessor2 = dep_start.splitlines()
-        endif1, endif2 = dep_end.splitlines()
-        self.assertEqual(preprocessor1, '#if defined(DEP1)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(preprocessor2, '#if defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif1, '#endif /* DEP2 */',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif2, '#endif /* DEP1 */',
-                         'Preprocessor generated incorrectly')
-
-    def test_disabled_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['!DEP1', '!DEP2']
-        dep_start, dep_end = gen_dependencies(dependencies)
-        preprocessor1, preprocessor2 = dep_start.splitlines()
-        endif1, endif2 = dep_end.splitlines()
-        self.assertEqual(preprocessor1, '#if !defined(DEP1)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(preprocessor2, '#if !defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif1, '#endif /* !DEP2 */',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif2, '#endif /* !DEP1 */',
-                         'Preprocessor generated incorrectly')
-
-    def test_mixed_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['!DEP1', 'DEP2']
-        dep_start, dep_end = gen_dependencies(dependencies)
-        preprocessor1, preprocessor2 = dep_start.splitlines()
-        endif1, endif2 = dep_end.splitlines()
-        self.assertEqual(preprocessor1, '#if !defined(DEP1)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(preprocessor2, '#if defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif1, '#endif /* DEP2 */',
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(endif2, '#endif /* !DEP1 */',
-                         'Preprocessor generated incorrectly')
-
-    def test_empty_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = []
-        dep_start, dep_end = gen_dependencies(dependencies)
-        self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly')
-        self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly')
-
-    def test_large_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = []
-        count = 10
-        for i in range(count):
-            dependencies.append('DEP%d' % i)
-        dep_start, dep_end = gen_dependencies(dependencies)
-        self.assertEqual(len(dep_start.splitlines()), count,
-                         'Preprocessor generated incorrectly')
-        self.assertEqual(len(dep_end.splitlines()), count,
-                         'Preprocessor generated incorrectly')
-
-
-class GenDepOneLine(TestCase):
-    """
-    Test Suite for testing gen_dependencies_one_line()
-    """
-
-    def test_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['DEP1', 'DEP2']
-        dep_str = gen_dependencies_one_line(dependencies)
-        self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-
-    def test_disabled_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['!DEP1', '!DEP2']
-        dep_str = gen_dependencies_one_line(dependencies)
-        self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-
-    def test_mixed_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = ['!DEP1', 'DEP2']
-        dep_str = gen_dependencies_one_line(dependencies)
-        self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)',
-                         'Preprocessor generated incorrectly')
-
-    def test_empty_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = []
-        dep_str = gen_dependencies_one_line(dependencies)
-        self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly')
-
-    def test_large_dependencies_list(self):
-        """
-        Test that gen_dep() correctly creates dependencies for given
-        dependency list.
-        :return:
-        """
-        dependencies = []
-        count = 10
-        for i in range(count):
-            dependencies.append('DEP%d' % i)
-        dep_str = gen_dependencies_one_line(dependencies)
-        expected = '#if ' + ' && '.join(['defined(%s)' %
-                                         x for x in dependencies])
-        self.assertEqual(dep_str, expected,
-                         'Preprocessor generated incorrectly')
-
-
-class GenFunctionWrapper(TestCase):
-    """
-    Test Suite for testing gen_function_wrapper()
-    """
-
-    def test_params_unpack(self):
-        """
-        Test that params are properly unpacked in the function call.
-
-        :return:
-        """
-        code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd'))
-        expected = '''
-void test_a_wrapper( void ** params )
-{
-
-    test_a( a, b, c, d );
-}
-'''
-        self.assertEqual(code, expected)
-
-    def test_local(self):
-        """
-        Test that params are properly unpacked in the function call.
-
-        :return:
-        """
-        code = gen_function_wrapper('test_a',
-                                    'int x = 1;', ('x', 'b', 'c', 'd'))
-        expected = '''
-void test_a_wrapper( void ** params )
-{
-int x = 1;
-    test_a( x, b, c, d );
-}
-'''
-        self.assertEqual(code, expected)
-
-    def test_empty_params(self):
-        """
-        Test that params are properly unpacked in the function call.
-
-        :return:
-        """
-        code = gen_function_wrapper('test_a', '', ())
-        expected = '''
-void test_a_wrapper( void ** params )
-{
-    (void)params;
-
-    test_a(  );
-}
-'''
-        self.assertEqual(code, expected)
-
-
-class GenDispatch(TestCase):
-    """
-    Test suite for testing gen_dispatch()
-    """
-
-    def test_dispatch(self):
-        """
-        Test that dispatch table entry is generated correctly.
-        :return:
-        """
-        code = gen_dispatch('test_a', ['DEP1', 'DEP2'])
-        expected = '''
-#if defined(DEP1) && defined(DEP2)
-    test_a_wrapper,
-#else
-    NULL,
-#endif
-'''
-        self.assertEqual(code, expected)
-
-    def test_empty_dependencies(self):
-        """
-        Test empty dependency list.
-        :return:
-        """
-        code = gen_dispatch('test_a', [])
-        expected = '''
-    test_a_wrapper,
-'''
-        self.assertEqual(code, expected)
-
-
-class StringIOWrapper(StringIO):
-    """
-    file like class to mock file object in tests.
-    """
-    def __init__(self, file_name, data, line_no=0):
-        """
-        Init file handle.
-
-        :param file_name:
-        :param data:
-        :param line_no:
-        """
-        super(StringIOWrapper, self).__init__(data)
-        self.line_no = line_no
-        self.name = file_name
-
-    def next(self):
-        """
-        Iterator method. This method overrides base class's
-        next method and extends the next method to count the line
-        numbers as each line is read.
-
-        :return: Line read from file.
-        """
-        parent = super(StringIOWrapper, self)
-        line = parent.__next__()
-        return line
-
-    def readline(self, _length=0):
-        """
-        Wrap the base class readline.
-
-        :param length:
-        :return:
-        """
-        line = super(StringIOWrapper, self).readline()
-        if line is not None:
-            self.line_no += 1
-        return line
-
-
-class ParseUntilPattern(TestCase):
-    """
-    Test Suite for testing parse_until_pattern().
-    """
-
-    def test_suite_headers(self):
-        """
-        Test that suite headers are parsed correctly.
-
-        :return:
-        """
-        data = '''#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-/* END_HEADER */
-'''
-        expected = '''#line 1 "test_suite_ut.function"
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data, line_no=0)
-        headers = parse_until_pattern(stream, END_HEADER_REGEX)
-        self.assertEqual(headers, expected)
-
-    def test_line_no(self):
-        """
-        Test that #line is set to correct line no. in source .function file.
-
-        :return:
-        """
-        data = '''#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-/* END_HEADER */
-'''
-        offset_line_no = 5
-        expected = '''#line %d "test_suite_ut.function"
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-''' % (offset_line_no + 1)
-        stream = StringIOWrapper('test_suite_ut.function', data,
-                                 offset_line_no)
-        headers = parse_until_pattern(stream, END_HEADER_REGEX)
-        self.assertEqual(headers, expected)
-
-    def test_no_end_header_comment(self):
-        """
-        Test that InvalidFileFormat is raised when end header comment is
-        missing.
-        :return:
-        """
-        data = '''#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(GeneratorInputError, parse_until_pattern, stream,
-                          END_HEADER_REGEX)
-
-
-class ParseSuiteDependencies(TestCase):
-    """
-    Test Suite for testing parse_suite_dependencies().
-    """
-
-    def test_suite_dependencies(self):
-        """
-
-        :return:
-        """
-        data = '''
- * depends_on:MBEDTLS_ECP_C
- * END_DEPENDENCIES
- */
-'''
-        expected = ['MBEDTLS_ECP_C']
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        dependencies = parse_suite_dependencies(stream)
-        self.assertEqual(dependencies, expected)
-
-    def test_no_end_dep_comment(self):
-        """
-        Test that InvalidFileFormat is raised when end dep comment is missing.
-        :return:
-        """
-        data = '''
-* depends_on:MBEDTLS_ECP_C
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(GeneratorInputError, parse_suite_dependencies,
-                          stream)
-
-    def test_dependencies_split(self):
-        """
-        Test that InvalidFileFormat is raised when end dep comment is missing.
-        :return:
-        """
-        data = '''
- * depends_on:MBEDTLS_ECP_C:A:B:   C  : D :F : G: !H
- * END_DEPENDENCIES
- */
-'''
-        expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H']
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        dependencies = parse_suite_dependencies(stream)
-        self.assertEqual(dependencies, expected)
-
-
-class ParseFuncDependencies(TestCase):
-    """
-    Test Suite for testing parse_function_dependencies()
-    """
-
-    def test_function_dependencies(self):
-        """
-        Test that parse_function_dependencies() correctly parses function
-        dependencies.
-        :return:
-        """
-        line = '/* BEGIN_CASE ' \
-               'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */'
-        expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO']
-        dependencies = parse_function_dependencies(line)
-        self.assertEqual(dependencies, expected)
-
-    def test_no_dependencies(self):
-        """
-        Test that parse_function_dependencies() correctly parses function
-        dependencies.
-        :return:
-        """
-        line = '/* BEGIN_CASE */'
-        dependencies = parse_function_dependencies(line)
-        self.assertEqual(dependencies, [])
-
-    def test_tolerance(self):
-        """
-        Test that parse_function_dependencies() correctly parses function
-        dependencies.
-        :return:
-        """
-        line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/'
-        dependencies = parse_function_dependencies(line)
-        self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F'])
-
-
-class ParseFuncSignature(TestCase):
-    """
-    Test Suite for parse_function_arguments().
-    """
-
-    def test_int_and_char_params(self):
-        """
-        Test int and char parameters parsing
-        :return:
-        """
-        line = 'void entropy_threshold( char * a, int b, int result )'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, ['char*', 'int', 'int'])
-        self.assertEqual(local, '')
-        self.assertEqual(arg_dispatch,
-                         ['(char *) params[0]',
-                          '((mbedtls_test_argument_t *) params[1])->sint',
-                          '((mbedtls_test_argument_t *) params[2])->sint'])
-
-    def test_hex_params(self):
-        """
-        Test hex parameters parsing
-        :return:
-        """
-        line = 'void entropy_threshold( char * a, data_t * h, int result )'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, ['char*', 'hex', 'int'])
-        self.assertEqual(local,
-                         '    data_t data1 = {(uint8_t *) params[1], '
-                         '((mbedtls_test_argument_t *) params[2])->len};\n')
-        self.assertEqual(arg_dispatch, ['(char *) params[0]',
-                                        '&data1',
-                                        '((mbedtls_test_argument_t *) params[3])->sint'])
-
-    def test_unsupported_arg(self):
-        """
-        Test unsupported argument type
-        :return:
-        """
-        line = 'void entropy_threshold( char * a, data_t * h, unknown_t result )'
-        self.assertRaises(ValueError, parse_function_arguments, line)
-
-    def test_empty_params(self):
-        """
-        Test no parameters (nothing between parentheses).
-        :return:
-        """
-        line = 'void entropy_threshold()'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, [])
-        self.assertEqual(local, '')
-        self.assertEqual(arg_dispatch, [])
-
-    def test_blank_params(self):
-        """
-        Test no parameters (space between parentheses).
-        :return:
-        """
-        line = 'void entropy_threshold( )'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, [])
-        self.assertEqual(local, '')
-        self.assertEqual(arg_dispatch, [])
-
-    def test_void_params(self):
-        """
-        Test no parameters (void keyword).
-        :return:
-        """
-        line = 'void entropy_threshold(void)'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, [])
-        self.assertEqual(local, '')
-        self.assertEqual(arg_dispatch, [])
-
-    def test_void_space_params(self):
-        """
-        Test no parameters (void with spaces).
-        :return:
-        """
-        line = 'void entropy_threshold( void )'
-        args, local, arg_dispatch = parse_function_arguments(line)
-        self.assertEqual(args, [])
-        self.assertEqual(local, '')
-        self.assertEqual(arg_dispatch, [])
-
-
-class ParseFunctionCode(TestCase):
-    """
-    Test suite for testing parse_function_code()
-    """
-
-    def test_no_function(self):
-        """
-        Test no test function found.
-        :return:
-        """
-        data = '''
-No
-test
-function
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        err_msg = 'file: test_suite_ut.function - Test functions not found!'
-        self.assertRaisesRegex(GeneratorInputError, err_msg,
-                               parse_function_code, stream, [], [])
-
-    def test_no_end_case_comment(self):
-        """
-        Test missing end case.
-        :return:
-        """
-        data = '''
-void test_func()
-{
-}
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        err_msg = r'file: test_suite_ut.function - '\
-                  'end case pattern .*? not found!'
-        self.assertRaisesRegex(GeneratorInputError, err_msg,
-                               parse_function_code, stream, [], [])
-
-    @patch("generate_test_code.parse_function_arguments")
-    def test_function_called(self,
-                             parse_function_arguments_mock):
-        """
-        Test parse_function_code()
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        data = '''
-void test_func()
-{
-}
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(GeneratorInputError, parse_function_code,
-                          stream, [], [])
-        self.assertTrue(parse_function_arguments_mock.called)
-        parse_function_arguments_mock.assert_called_with('void test_func()\n')
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_return(self, parse_function_arguments_mock,
-                    gen_function_wrapper_mock,
-                    gen_dependencies_mock,
-                    gen_dispatch_mock):
-        """
-        Test generated code.
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-void func()
-{
-    ba ba black sheep
-    have you any wool
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        name, arg, code, dispatch_code = parse_function_code(stream, [], [])
-
-        self.assertTrue(parse_function_arguments_mock.called)
-        parse_function_arguments_mock.assert_called_with('void func()\n')
-        gen_function_wrapper_mock.assert_called_with('test_func', '', [])
-        self.assertEqual(name, 'test_func')
-        self.assertEqual(arg, [])
-        expected = '''#line 1 "test_suite_ut.function"
-
-void test_func(void)
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    ;
-}
-'''
-        self.assertEqual(code, expected)
-        self.assertEqual(dispatch_code, "\n    test_func_wrapper,\n")
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_with_exit_label(self, parse_function_arguments_mock,
-                             gen_function_wrapper_mock,
-                             gen_dependencies_mock,
-                             gen_dispatch_mock):
-        """
-        Test when exit label is present.
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-void func()
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-void test_func(void)
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-    def test_non_void_function(self):
-        """
-        Test invalid signature (non void).
-        :return:
-        """
-        data = 'int entropy_threshold( char * a, data_t * h, int result )'
-        err_msg = 'file: test_suite_ut.function - Test functions not found!'
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaisesRegex(GeneratorInputError, err_msg,
-                               parse_function_code, stream, [], [])
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_function_name_on_newline(self, parse_function_arguments_mock,
-                                      gen_function_wrapper_mock,
-                                      gen_dependencies_mock,
-                                      gen_dispatch_mock):
-        """
-        Test with line break before the function name.
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-void
-
-
-func()
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-void
-
-
-test_func(void)
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_case_starting_with_comment(self, parse_function_arguments_mock,
-                                        gen_function_wrapper_mock,
-                                        gen_dependencies_mock,
-                                        gen_dispatch_mock):
-        """
-        Test with comments before the function signature
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''/* comment */
-/* more
- * comment */
-// this is\\
-still \\
-a comment
-void func()
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-
-
-
-
-
-void test_func(void)
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_comment_in_prototype(self, parse_function_arguments_mock,
-                                  gen_function_wrapper_mock,
-                                  gen_dependencies_mock,
-                                  gen_dispatch_mock):
-        """
-        Test with comments in the function prototype
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-void func( int x, // (line \\
-                     comment)
-           int y /* lone closing parenthesis) */ )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-void test_func( int x,
-
-           int y                                 )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_line_comment_in_block_comment(self, parse_function_arguments_mock,
-                                           gen_function_wrapper_mock,
-                                           gen_dependencies_mock,
-                                           gen_dispatch_mock):
-        """
-        Test with line comment in block comment.
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-void func( int x /* // */ )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-void test_func( int x          )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-    @patch("generate_test_code.gen_dispatch")
-    @patch("generate_test_code.gen_dependencies")
-    @patch("generate_test_code.gen_function_wrapper")
-    @patch("generate_test_code.parse_function_arguments")
-    def test_block_comment_in_line_comment(self, parse_function_arguments_mock,
-                                           gen_function_wrapper_mock,
-                                           gen_dependencies_mock,
-                                           gen_dispatch_mock):
-        """
-        Test with block comment in line comment.
-        :return:
-        """
-        parse_function_arguments_mock.return_value = ([], '', [])
-        gen_function_wrapper_mock.return_value = ''
-        gen_dependencies_mock.side_effect = gen_dependencies
-        gen_dispatch_mock.side_effect = gen_dispatch
-        data = '''
-// /*
-void func( int x )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        _, _, code, _ = parse_function_code(stream, [], [])
-
-        expected = '''#line 1 "test_suite_ut.function"
-
-
-void test_func( int x )
-{
-    ba ba black sheep
-    have you any wool
-exit:
-    yes sir yes sir
-    3 bags full
-}
-'''
-        self.assertEqual(code, expected)
-
-
-class ParseFunction(TestCase):
-    """
-    Test Suite for testing parse_functions()
-    """
-
-    @patch("generate_test_code.parse_until_pattern")
-    def test_begin_header(self, parse_until_pattern_mock):
-        """
-        Test that begin header is checked and parse_until_pattern() is called.
-        :return:
-        """
-        def stop(*_unused):
-            """Stop when parse_until_pattern is called."""
-            raise Exception
-        parse_until_pattern_mock.side_effect = stop
-        data = '''/* BEGIN_HEADER */
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-/* END_HEADER */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(Exception, parse_functions, stream)
-        parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX)
-        self.assertEqual(stream.line_no, 1)
-
-    @patch("generate_test_code.parse_until_pattern")
-    def test_begin_helper(self, parse_until_pattern_mock):
-        """
-        Test that begin helper is checked and parse_until_pattern() is called.
-        :return:
-        """
-        def stop(*_unused):
-            """Stop when parse_until_pattern is called."""
-            raise Exception
-        parse_until_pattern_mock.side_effect = stop
-        data = '''/* BEGIN_SUITE_HELPERS */
-void print_hello_world()
-{
-    printf("Hello World!\n");
-}
-/* END_SUITE_HELPERS */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(Exception, parse_functions, stream)
-        parse_until_pattern_mock.assert_called_with(stream,
-                                                    END_SUITE_HELPERS_REGEX)
-        self.assertEqual(stream.line_no, 1)
-
-    @patch("generate_test_code.parse_suite_dependencies")
-    def test_begin_dep(self, parse_suite_dependencies_mock):
-        """
-        Test that begin dep is checked and parse_suite_dependencies() is
-        called.
-        :return:
-        """
-        def stop(*_unused):
-            """Stop when parse_until_pattern is called."""
-            raise Exception
-        parse_suite_dependencies_mock.side_effect = stop
-        data = '''/* BEGIN_DEPENDENCIES
- * depends_on:MBEDTLS_ECP_C
- * END_DEPENDENCIES
- */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(Exception, parse_functions, stream)
-        parse_suite_dependencies_mock.assert_called_with(stream)
-        self.assertEqual(stream.line_no, 1)
-
-    @patch("generate_test_code.parse_function_dependencies")
-    def test_begin_function_dep(self, func_mock):
-        """
-        Test that begin dep is checked and parse_function_dependencies() is
-        called.
-        :return:
-        """
-        def stop(*_unused):
-            """Stop when parse_until_pattern is called."""
-            raise Exception
-        func_mock.side_effect = stop
-
-        dependencies_str = '/* BEGIN_CASE ' \
-            'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
-        data = '''%svoid test_func()
-{
-}
-''' % dependencies_str
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(Exception, parse_functions, stream)
-        func_mock.assert_called_with(dependencies_str)
-        self.assertEqual(stream.line_no, 1)
-
-    @patch("generate_test_code.parse_function_code")
-    @patch("generate_test_code.parse_function_dependencies")
-    def test_return(self, func_mock1, func_mock2):
-        """
-        Test that begin case is checked and parse_function_code() is called.
-        :return:
-        """
-        func_mock1.return_value = []
-        in_func_code = '''void test_func()
-{
-}
-'''
-        func_dispatch = '''
-    test_func_wrapper,
-'''
-        func_mock2.return_value = 'test_func', [],\
-            in_func_code, func_dispatch
-        dependencies_str = '/* BEGIN_CASE ' \
-            'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
-        data = '''%svoid test_func()
-{
-}
-''' % dependencies_str
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        suite_dependencies, dispatch_code, func_code, func_info = \
-            parse_functions(stream)
-        func_mock1.assert_called_with(dependencies_str)
-        func_mock2.assert_called_with(stream, [], [])
-        self.assertEqual(stream.line_no, 5)
-        self.assertEqual(suite_dependencies, [])
-        expected_dispatch_code = '''/* Function Id: 0 */
-
-    test_func_wrapper,
-'''
-        self.assertEqual(dispatch_code, expected_dispatch_code)
-        self.assertEqual(func_code, in_func_code)
-        self.assertEqual(func_info, {'test_func': (0, [])})
-
-    def test_parsing(self):
-        """
-        Test case parsing.
-        :return:
-        """
-        data = '''/* BEGIN_HEADER */
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-/* END_HEADER */
-
-/* BEGIN_DEPENDENCIES
- * depends_on:MBEDTLS_ECP_C
- * END_DEPENDENCIES
- */
-
-/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
-void func1()
-{
-}
-/* END_CASE */
-
-/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
-void func2()
-{
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        suite_dependencies, dispatch_code, func_code, func_info = \
-            parse_functions(stream)
-        self.assertEqual(stream.line_no, 23)
-        self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C'])
-
-        expected_dispatch_code = '''/* Function Id: 0 */
-
-#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
-    test_func1_wrapper,
-#else
-    NULL,
-#endif
-/* Function Id: 1 */
-
-#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
-    test_func2_wrapper,
-#else
-    NULL,
-#endif
-'''
-        self.assertEqual(dispatch_code, expected_dispatch_code)
-        expected_func_code = '''#if defined(MBEDTLS_ECP_C)
-#line 2 "test_suite_ut.function"
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-#if defined(MBEDTLS_ENTROPY_NV_SEED)
-#if defined(MBEDTLS_FS_IO)
-#line 13 "test_suite_ut.function"
-void test_func1(void)
-{
-exit:
-    ;
-}
-
-void test_func1_wrapper( void ** params )
-{
-    (void)params;
-
-    test_func1(  );
-}
-#endif /* MBEDTLS_FS_IO */
-#endif /* MBEDTLS_ENTROPY_NV_SEED */
-#if defined(MBEDTLS_ENTROPY_NV_SEED)
-#if defined(MBEDTLS_FS_IO)
-#line 19 "test_suite_ut.function"
-void test_func2(void)
-{
-exit:
-    ;
-}
-
-void test_func2_wrapper( void ** params )
-{
-    (void)params;
-
-    test_func2(  );
-}
-#endif /* MBEDTLS_FS_IO */
-#endif /* MBEDTLS_ENTROPY_NV_SEED */
-#endif /* MBEDTLS_ECP_C */
-'''
-        self.assertEqual(func_code, expected_func_code)
-        self.assertEqual(func_info, {'test_func1': (0, []),
-                                     'test_func2': (1, [])})
-
-    def test_same_function_name(self):
-        """
-        Test name conflict.
-        :return:
-        """
-        data = '''/* BEGIN_HEADER */
-#include "mbedtls/ecp.h"
-
-#define ECP_PF_UNKNOWN     -1
-/* END_HEADER */
-
-/* BEGIN_DEPENDENCIES
- * depends_on:MBEDTLS_ECP_C
- * END_DEPENDENCIES
- */
-
-/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
-void func()
-{
-}
-/* END_CASE */
-
-/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
-void func()
-{
-}
-/* END_CASE */
-'''
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        self.assertRaises(GeneratorInputError, parse_functions, stream)
-
-
-class EscapedSplit(TestCase):
-    """
-    Test suite for testing escaped_split().
-    Note: Since escaped_split() output is used to write back to the
-    intermediate data file. Any escape characters in the input are
-    retained in the output.
-    """
-
-    def test_invalid_input(self):
-        """
-        Test when input split character is not a character.
-        :return:
-        """
-        self.assertRaises(ValueError, escaped_split, '', 'string')
-
-    def test_empty_string(self):
-        """
-        Test empty string input.
-        :return:
-        """
-        splits = escaped_split('', ':')
-        self.assertEqual(splits, [])
-
-    def test_no_escape(self):
-        """
-        Test with no escape character. The behaviour should be same as
-        str.split()
-        :return:
-        """
-        test_str = 'yahoo:google'
-        splits = escaped_split(test_str, ':')
-        self.assertEqual(splits, test_str.split(':'))
-
-    def test_escaped_input(self):
-        """
-        Test input that has escaped delimiter.
-        :return:
-        """
-        test_str = r'yahoo\:google:facebook'
-        splits = escaped_split(test_str, ':')
-        self.assertEqual(splits, [r'yahoo\:google', 'facebook'])
-
-    def test_escaped_escape(self):
-        """
-        Test input that has escaped delimiter.
-        :return:
-        """
-        test_str = r'yahoo\\:google:facebook'
-        splits = escaped_split(test_str, ':')
-        self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook'])
-
-    def test_all_at_once(self):
-        """
-        Test input that has escaped delimiter.
-        :return:
-        """
-        test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia'
-        splits = escaped_split(test_str, ':')
-        self.assertEqual(splits, [r'yahoo\\', r'google',
-                                  r'facebook\:instagram\\',
-                                  r'bbc\\', r'wikipedia'])
-
-
-class ParseTestData(TestCase):
-    """
-    Test suite for parse test data.
-    """
-
-    def test_parser(self):
-        """
-        Test that tests are parsed correctly from data file.
-        :return:
-        """
-        data = """
-Diffie-Hellman full exchange #1
-dhm_do_dhm:10:"23":10:"5"
-
-Diffie-Hellman full exchange #2
-dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
-
-Diffie-Hellman full exchange #3
-dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271"
-
-Diffie-Hellman selftest
-dhm_selftest:
-"""
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        # List of (name, function_name, dependencies, args)
-        tests = list(parse_test_data(stream))
-        test1, test2, test3, test4 = tests
-        self.assertEqual(test1[0], 3)
-        self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
-        self.assertEqual(test1[2], 'dhm_do_dhm')
-        self.assertEqual(test1[3], [])
-        self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
-
-        self.assertEqual(test2[0], 6)
-        self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
-        self.assertEqual(test2[2], 'dhm_do_dhm')
-        self.assertEqual(test2[3], [])
-        self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
-                                    '10', '"9345098304850938450983409622"'])
-
-        self.assertEqual(test3[0], 9)
-        self.assertEqual(test3[1], 'Diffie-Hellman full exchange #3')
-        self.assertEqual(test3[2], 'dhm_do_dhm')
-        self.assertEqual(test3[3], [])
-        self.assertEqual(test3[4], ['10',
-                                    '"9345098382739712938719287391879381271"',
-                                    '10',
-                                    '"9345098792137312973297123912791271"'])
-
-        self.assertEqual(test4[0], 12)
-        self.assertEqual(test4[1], 'Diffie-Hellman selftest')
-        self.assertEqual(test4[2], 'dhm_selftest')
-        self.assertEqual(test4[3], [])
-        self.assertEqual(test4[4], [])
-
-    def test_with_dependencies(self):
-        """
-        Test that tests with dependencies are parsed.
-        :return:
-        """
-        data = """
-Diffie-Hellman full exchange #1
-depends_on:YAHOO
-dhm_do_dhm:10:"23":10:"5"
-
-Diffie-Hellman full exchange #2
-dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
-
-"""
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        # List of (name, function_name, dependencies, args)
-        tests = list(parse_test_data(stream))
-        test1, test2 = tests
-        self.assertEqual(test1[0], 4)
-        self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
-        self.assertEqual(test1[2], 'dhm_do_dhm')
-        self.assertEqual(test1[3], ['YAHOO'])
-        self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
-
-        self.assertEqual(test2[0], 7)
-        self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
-        self.assertEqual(test2[2], 'dhm_do_dhm')
-        self.assertEqual(test2[3], [])
-        self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
-                                    '10', '"9345098304850938450983409622"'])
-
-    def test_no_args(self):
-        """
-        Test GeneratorInputError is raised when test function name and
-        args line is missing.
-        :return:
-        """
-        data = """
-Diffie-Hellman full exchange #1
-depends_on:YAHOO
-
-
-Diffie-Hellman full exchange #2
-dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
-
-"""
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        err = None
-        try:
-            for _, _, _, _, _ in parse_test_data(stream):
-                pass
-        except GeneratorInputError as err:
-            self.assertEqual(type(err), GeneratorInputError)
-
-    def test_incomplete_data(self):
-        """
-        Test GeneratorInputError is raised when test function name
-        and args line is missing.
-        :return:
-        """
-        data = """
-Diffie-Hellman full exchange #1
-depends_on:YAHOO
-"""
-        stream = StringIOWrapper('test_suite_ut.function', data)
-        err = None
-        try:
-            for _, _, _, _, _ in parse_test_data(stream):
-                pass
-        except GeneratorInputError as err:
-            self.assertEqual(type(err), GeneratorInputError)
-
-
-class GenDepCheck(TestCase):
-    """
-    Test suite for gen_dep_check(). It is assumed this function is
-    called with valid inputs.
-    """
-
-    def test_gen_dep_check(self):
-        """
-        Test that dependency check code generated correctly.
-        :return:
-        """
-        expected = """
-        case 5:
-            {
-#if defined(YAHOO)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;"""
-        out = gen_dep_check(5, 'YAHOO')
-        self.assertEqual(out, expected)
-
-    def test_not_defined_dependency(self):
-        """
-        Test dependency with !.
-        :return:
-        """
-        expected = """
-        case 5:
-            {
-#if !defined(YAHOO)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;"""
-        out = gen_dep_check(5, '!YAHOO')
-        self.assertEqual(out, expected)
-
-    def test_empty_dependency(self):
-        """
-        Test invalid dependency input.
-        :return:
-        """
-        self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!')
-
-    def test_negative_dep_id(self):
-        """
-        Test invalid dependency input.
-        :return:
-        """
-        self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO')
-
-
-class GenExpCheck(TestCase):
-    """
-    Test suite for gen_expression_check(). It is assumed this function
-    is called with valid inputs.
-    """
-
-    def test_gen_exp_check(self):
-        """
-        Test that expression check code generated correctly.
-        :return:
-        """
-        expected = """
-        case 5:
-            {
-                *out_value = YAHOO;
-            }
-            break;"""
-        out = gen_expression_check(5, 'YAHOO')
-        self.assertEqual(out, expected)
-
-    def test_invalid_expression(self):
-        """
-        Test invalid expression input.
-        :return:
-        """
-        self.assertRaises(GeneratorInputError, gen_expression_check, 5, '')
-
-    def test_negative_exp_id(self):
-        """
-        Test invalid expression id.
-        :return:
-        """
-        self.assertRaises(GeneratorInputError, gen_expression_check,
-                          -1, 'YAHOO')
-
-
-class WriteDependencies(TestCase):
-    """
-    Test suite for testing write_dependencies.
-    """
-
-    def test_no_test_dependencies(self):
-        """
-        Test when test dependencies input is empty.
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_dependencies = []
-        dep_check_code = write_dependencies(stream, [], unique_dependencies)
-        self.assertEqual(dep_check_code, '')
-        self.assertEqual(len(unique_dependencies), 0)
-        self.assertEqual(stream.getvalue(), '')
-
-    def test_unique_dep_ids(self):
-        """
-
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_dependencies = []
-        dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'],
-                                            unique_dependencies)
-        expect_dep_check_code = '''
-        case 0:
-            {
-#if defined(DEP3)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;
-        case 1:
-            {
-#if defined(DEP2)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;
-        case 2:
-            {
-#if defined(DEP1)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;'''
-        self.assertEqual(dep_check_code, expect_dep_check_code)
-        self.assertEqual(len(unique_dependencies), 3)
-        self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n')
-
-    def test_dep_id_repeat(self):
-        """
-
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_dependencies = []
-        dep_check_code = ''
-        dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'],
-                                             unique_dependencies)
-        dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'],
-                                             unique_dependencies)
-        dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'],
-                                             unique_dependencies)
-        expect_dep_check_code = '''
-        case 0:
-            {
-#if defined(DEP3)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;
-        case 1:
-            {
-#if defined(DEP2)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;
-        case 2:
-            {
-#if defined(DEP1)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;'''
-        self.assertEqual(dep_check_code, expect_dep_check_code)
-        self.assertEqual(len(unique_dependencies), 3)
-        self.assertEqual(stream.getvalue(),
-                         'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n')
-
-
-class WriteParams(TestCase):
-    """
-    Test Suite for testing write_parameters().
-    """
-
-    def test_no_params(self):
-        """
-        Test with empty test_args
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_expressions = []
-        expression_code = write_parameters(stream, [], [], unique_expressions)
-        self.assertEqual(len(unique_expressions), 0)
-        self.assertEqual(expression_code, '')
-        self.assertEqual(stream.getvalue(), '\n')
-
-    def test_no_exp_param(self):
-        """
-        Test when there is no macro or expression in the params.
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_expressions = []
-        expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"',
-                                                    '0'],
-                                           ['char*', 'hex', 'int'],
-                                           unique_expressions)
-        self.assertEqual(len(unique_expressions), 0)
-        self.assertEqual(expression_code, '')
-        self.assertEqual(stream.getvalue(),
-                         ':char*:"Yahoo":hex:"abcdef00":int:0\n')
-
-    def test_hex_format_int_param(self):
-        """
-        Test int parameter in hex format.
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_expressions = []
-        expression_code = write_parameters(stream,
-                                           ['"Yahoo"', '"abcdef00"', '0xAA'],
-                                           ['char*', 'hex', 'int'],
-                                           unique_expressions)
-        self.assertEqual(len(unique_expressions), 0)
-        self.assertEqual(expression_code, '')
-        self.assertEqual(stream.getvalue(),
-                         ':char*:"Yahoo":hex:"abcdef00":int:0xAA\n')
-
-    def test_with_exp_param(self):
-        """
-        Test when there is macro or expression in the params.
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_expressions = []
-        expression_code = write_parameters(stream,
-                                           ['"Yahoo"', '"abcdef00"', '0',
-                                            'MACRO1', 'MACRO2', 'MACRO3'],
-                                           ['char*', 'hex', 'int',
-                                            'int', 'int', 'int'],
-                                           unique_expressions)
-        self.assertEqual(len(unique_expressions), 3)
-        self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
-        expected_expression_code = '''
-        case 0:
-            {
-                *out_value = MACRO1;
-            }
-            break;
-        case 1:
-            {
-                *out_value = MACRO2;
-            }
-            break;
-        case 2:
-            {
-                *out_value = MACRO3;
-            }
-            break;'''
-        self.assertEqual(expression_code, expected_expression_code)
-        self.assertEqual(stream.getvalue(),
-                         ':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1'
-                         ':exp:2\n')
-
-    def test_with_repeat_calls(self):
-        """
-        Test when write_parameter() is called with same macro or expression.
-        :return:
-        """
-        stream = StringIOWrapper('test_suite_ut.data', '')
-        unique_expressions = []
-        expression_code = ''
-        expression_code += write_parameters(stream,
-                                            ['"Yahoo"', 'MACRO1', 'MACRO2'],
-                                            ['char*', 'int', 'int'],
-                                            unique_expressions)
-        expression_code += write_parameters(stream,
-                                            ['"abcdef00"', 'MACRO2', 'MACRO3'],
-                                            ['hex', 'int', 'int'],
-                                            unique_expressions)
-        expression_code += write_parameters(stream,
-                                            ['0', 'MACRO3', 'MACRO1'],
-                                            ['int', 'int', 'int'],
-                                            unique_expressions)
-        self.assertEqual(len(unique_expressions), 3)
-        self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
-        expected_expression_code = '''
-        case 0:
-            {
-                *out_value = MACRO1;
-            }
-            break;
-        case 1:
-            {
-                *out_value = MACRO2;
-            }
-            break;
-        case 2:
-            {
-                *out_value = MACRO3;
-            }
-            break;'''
-        self.assertEqual(expression_code, expected_expression_code)
-        expected_data_file = ''':char*:"Yahoo":exp:0:exp:1
-:hex:"abcdef00":exp:1:exp:2
-:int:0:exp:2:exp:0
-'''
-        self.assertEqual(stream.getvalue(), expected_data_file)
-
-
-class GenTestSuiteDependenciesChecks(TestCase):
-    """
-    Test suite for testing gen_suite_dep_checks()
-    """
-    def test_empty_suite_dependencies(self):
-        """
-        Test with empty suite_dependencies list.
-
-        :return:
-        """
-        dep_check_code, expression_code = \
-            gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE')
-        self.assertEqual(dep_check_code, 'DEP_CHECK_CODE')
-        self.assertEqual(expression_code, 'EXPRESSION_CODE')
-
-    def test_suite_dependencies(self):
-        """
-        Test with suite_dependencies list.
-
-        :return:
-        """
-        dep_check_code, expression_code = \
-            gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE',
-                                 'EXPRESSION_CODE')
-        expected_dep_check_code = '''
-#if defined(SUITE_DEP)
-DEP_CHECK_CODE
-#endif
-'''
-        expected_expression_code = '''
-#if defined(SUITE_DEP)
-EXPRESSION_CODE
-#endif
-'''
-        self.assertEqual(dep_check_code, expected_dep_check_code)
-        self.assertEqual(expression_code, expected_expression_code)
-
-    def test_no_dep_no_exp(self):
-        """
-        Test when there are no dependency and expression code.
-        :return:
-        """
-        dep_check_code, expression_code = gen_suite_dep_checks([], '', '')
-        self.assertEqual(dep_check_code, '')
-        self.assertEqual(expression_code, '')
-
-
-class GenFromTestData(TestCase):
-    """
-    Test suite for gen_from_test_data()
-    """
-
-    @staticmethod
-    @patch("generate_test_code.write_dependencies")
-    @patch("generate_test_code.write_parameters")
-    @patch("generate_test_code.gen_suite_dep_checks")
-    def test_intermediate_data_file(func_mock1,
-                                    write_parameters_mock,
-                                    write_dependencies_mock):
-        """
-        Test that intermediate data file is written with expected data.
-        :return:
-        """
-        data = '''
-My test
-depends_on:DEP1
-func1:0
-'''
-        data_f = StringIOWrapper('test_suite_ut.data', data)
-        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
-        func_info = {'test_func1': (1, ('int',))}
-        suite_dependencies = []
-        write_parameters_mock.side_effect = write_parameters
-        write_dependencies_mock.side_effect = write_dependencies
-        func_mock1.side_effect = gen_suite_dep_checks
-        gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies)
-        write_dependencies_mock.assert_called_with(out_data_f,
-                                                   ['DEP1'], ['DEP1'])
-        write_parameters_mock.assert_called_with(out_data_f, ['0'],
-                                                 ('int',), [])
-        expected_dep_check_code = '''
-        case 0:
-            {
-#if defined(DEP1)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;'''
-        func_mock1.assert_called_with(
-            suite_dependencies, expected_dep_check_code, '')
-
-    def test_function_not_found(self):
-        """
-        Test that AssertError is raised when function info in not found.
-        :return:
-        """
-        data = '''
-My test
-depends_on:DEP1
-func1:0
-'''
-        data_f = StringIOWrapper('test_suite_ut.data', data)
-        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
-        func_info = {'test_func2': (1, ('int',))}
-        suite_dependencies = []
-        self.assertRaises(GeneratorInputError, gen_from_test_data,
-                          data_f, out_data_f, func_info, suite_dependencies)
-
-    def test_different_func_args(self):
-        """
-        Test that AssertError is raised when no. of parameters and
-        function args differ.
-        :return:
-        """
-        data = '''
-My test
-depends_on:DEP1
-func1:0
-'''
-        data_f = StringIOWrapper('test_suite_ut.data', data)
-        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
-        func_info = {'test_func2': (1, ('int', 'hex'))}
-        suite_dependencies = []
-        self.assertRaises(GeneratorInputError, gen_from_test_data, data_f,
-                          out_data_f, func_info, suite_dependencies)
-
-    def test_output(self):
-        """
-        Test that intermediate data file is written with expected data.
-        :return:
-        """
-        data = '''
-My test 1
-depends_on:DEP1
-func1:0:0xfa:MACRO1:MACRO2
-
-My test 2
-depends_on:DEP1:DEP2
-func2:"yahoo":88:MACRO1
-'''
-        data_f = StringIOWrapper('test_suite_ut.data', data)
-        out_data_f = StringIOWrapper('test_suite_ut.datax', '')
-        func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')),
-                     'test_func2': (1, ('char*', 'int', 'int'))}
-        suite_dependencies = []
-        dep_check_code, expression_code = \
-            gen_from_test_data(data_f, out_data_f, func_info,
-                               suite_dependencies)
-        expected_dep_check_code = '''
-        case 0:
-            {
-#if defined(DEP1)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;
-        case 1:
-            {
-#if defined(DEP2)
-                ret = DEPENDENCY_SUPPORTED;
-#else
-                ret = DEPENDENCY_NOT_SUPPORTED;
-#endif
-            }
-            break;'''
-        expected_data = '''My test 1
-depends_on:0
-0:int:0:int:0xfa:exp:0:exp:1
-
-My test 2
-depends_on:0:1
-1:char*:"yahoo":int:88:exp:0
-
-'''
-        expected_expression_code = '''
-        case 0:
-            {
-                *out_value = MACRO1;
-            }
-            break;
-        case 1:
-            {
-                *out_value = MACRO2;
-            }
-            break;'''
-        self.assertEqual(dep_check_code, expected_dep_check_code)
-        self.assertEqual(out_data_f.getvalue(), expected_data)
-        self.assertEqual(expression_code, expected_expression_code)
-
-
-if __name__ == '__main__':
-    unittest_main()
diff --git a/tests/suites/test_suite_pk.function b/tests/suites/test_suite_pk.function
index 1188137..23f5cda 100644
--- a/tests/suites/test_suite_pk.function
+++ b/tests/suites/test_suite_pk.function
@@ -243,7 +243,7 @@
 /** Setup the provided PK context.
  *
  * Predefined keys used for the setup are taken from "test/src/test_keys.h"
- * which is automatically generated using "tests/scripts/generate_test_keys.py".
+ * which is automatically generated using "framework/scripts/generate_test_keys.py".
  *
  * \param pk               The PK object to fill. It must  have been initialized
  *                         (mbedtls_pk_init()), but not setup (mbedtls_pk_setup()).