Squashed 'lib/mbedtls/external/mbedtls/' content from commit 2ca6c285a0dd
git-subtree-dir: lib/mbedtls/external/mbedtls
git-subtree-split: 2ca6c285a0dd3f33982dd57299012dacab1ff206
diff --git a/tests/scripts/all-in-docker.sh b/tests/scripts/all-in-docker.sh
new file mode 100755
index 0000000..b2a31c2
--- /dev/null
+++ b/tests/scripts/all-in-docker.sh
@@ -0,0 +1,27 @@
+#!/bin/bash -eu
+
+# all-in-docker.sh
+#
+# Purpose
+# -------
+# This runs all.sh (except for armcc) in a Docker container.
+#
+# WARNING: the Dockerfile used by this script is no longer maintained! See
+# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
+# for the set of Docker images we use on the CI.
+#
+# Notes for users
+# ---------------
+# See docker_env.sh for prerequisites and other information.
+#
+# See also all.sh for notes about invocation of that script.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+source tests/scripts/docker_env.sh
+
+# Run tests that are possible with openly available compilers
+run_in_docker tests/scripts/all.sh \
+ --no-armcc \
+ $@
diff --git a/tests/scripts/all.sh b/tests/scripts/all.sh
new file mode 100755
index 0000000..a1203f7
--- /dev/null
+++ b/tests/scripts/all.sh
@@ -0,0 +1,6531 @@
+#! /usr/bin/env bash
+
+# all.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+
+
+################################################################
+#### Documentation
+################################################################
+
+# Purpose
+# -------
+#
+# To run all tests possible or available on the platform.
+#
+# Notes for users
+# ---------------
+#
+# Warning: the test is destructive. It includes various build modes and
+# configurations, and can and will arbitrarily change the current CMake
+# configuration. The following files must be committed into git:
+# * include/mbedtls/mbedtls_config.h
+# * Makefile, library/Makefile, programs/Makefile, tests/Makefile,
+# programs/fuzz/Makefile
+# After running this script, the CMake cache will be lost and CMake
+# will no longer be initialised.
+#
+# The script assumes the presence of a number of tools:
+# * Basic Unix tools (Windows users note: a Unix-style find must be before
+# the Windows find in the PATH)
+# * Perl
+# * GNU Make
+# * CMake
+# * GCC and Clang (recent enough for using ASan with gcc and MemSan with clang, or valgrind)
+# * G++
+# * arm-gcc and mingw-gcc
+# * ArmCC 5 and ArmCC 6, unless invoked with --no-armcc
+# * OpenSSL and GnuTLS command line tools, in suitable versions for the
+# interoperability tests. The following are the official versions at the
+# time of writing:
+# * GNUTLS_{CLI,SERV} = 3.4.10
+# * GNUTLS_NEXT_{CLI,SERV} = 3.7.2
+# * OPENSSL = 1.0.2g (without Debian/Ubuntu patches)
+# * OPENSSL_NEXT = 1.1.1a
+# See the invocation of check_tools below for details.
+#
+# This script must be invoked from the toplevel directory of a git
+# working copy of Mbed TLS.
+#
+# The behavior on an error depends on whether --keep-going (alias -k)
+# is in effect.
+# * Without --keep-going: the script stops on the first error without
+# cleaning up. This lets you work in the configuration of the failing
+# component.
+# * With --keep-going: the script runs all requested components and
+# reports failures at the end. In particular the script always cleans
+# up on exit.
+#
+# Note that the output is not saved. You may want to run
+# script -c tests/scripts/all.sh
+# or
+# tests/scripts/all.sh >all.log 2>&1
+#
+# Notes for maintainers
+# ---------------------
+#
+# The bulk of the code is organized into functions that follow one of the
+# following naming conventions:
+# * pre_XXX: things to do before running the tests, in order.
+# * component_XXX: independent components. They can be run in any order.
+# * component_check_XXX: quick tests that aren't worth parallelizing.
+# * component_build_XXX: build things but don't run them.
+# * component_test_XXX: build and test.
+# * component_release_XXX: tests that the CI should skip during PR testing.
+# * support_XXX: if support_XXX exists and returns false then
+# component_XXX is not run by default.
+# * post_XXX: things to do after running the tests.
+# * other: miscellaneous support functions.
+#
+# Each component must start by invoking `msg` with a short informative message.
+#
+# Warning: due to the way bash detects errors, the failure of a command
+# inside 'if' or '!' is not detected. Use the 'not' function instead of '!'.
+#
+# Each component is executed in a separate shell process. The component
+# fails if any command in it returns a non-zero status.
+#
+# The framework performs some cleanup tasks after each component. This
+# means that components can assume that the working directory is in a
+# cleaned-up state, and don't need to perform the cleanup themselves.
+# * Run `make clean`.
+# * Restore `include/mbedtls/mbedtls_config.h` from a backup made before running
+# the component.
+# * Check out `Makefile`, `library/Makefile`, `programs/Makefile`,
+# `tests/Makefile` and `programs/fuzz/Makefile` from git.
+# This cleans up after an in-tree use of CMake.
+#
+# The tests are roughly in order from fastest to slowest. This doesn't
+# have to be exact, but in general you should add slower tests towards
+# the end and fast checks near the beginning.
+
+
+
+################################################################
+#### Initialization and command line parsing
+################################################################
+
+# Abort on errors (even on the left-hand side of a pipe).
+# Treat uninitialised variables as errors.
+set -e -o pipefail -u
+
+# Enable ksh/bash extended file matching patterns
+shopt -s extglob
+
+in_mbedtls_repo () {
+ test -d include -a -d library -a -d programs -a -d tests
+}
+
+in_tf_psa_crypto_repo () {
+ test -d include -a -d core -a -d drivers -a -d programs -a -d tests
+}
+
+pre_check_environment () {
+ if in_mbedtls_repo || in_tf_psa_crypto_repo; then :; else
+ echo "Must be run from Mbed TLS / TF-PSA-Crypto root" >&2
+ exit 1
+ fi
+}
+
+pre_initialize_variables () {
+ if in_mbedtls_repo; then
+ CONFIG_H='include/mbedtls/mbedtls_config.h'
+ else
+ CONFIG_H='drivers/builtin/include/mbedtls/mbedtls_config.h'
+ fi
+ CRYPTO_CONFIG_H='include/psa/crypto_config.h'
+ CONFIG_TEST_DRIVER_H='tests/include/test/drivers/config_test_driver.h'
+
+ # Files that are clobbered by some jobs will be backed up. Use a different
+ # suffix from auxiliary scripts so that all.sh and auxiliary scripts can
+ # independently decide when to remove the backup file.
+ backup_suffix='.all.bak'
+ # Files clobbered by config.py
+ files_to_back_up="$CONFIG_H $CRYPTO_CONFIG_H $CONFIG_TEST_DRIVER_H"
+ if in_mbedtls_repo; then
+ # Files clobbered by in-tree cmake
+ files_to_back_up="$files_to_back_up Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile"
+ fi
+
+ append_outcome=0
+ MEMORY=0
+ FORCE=0
+ QUIET=0
+ KEEP_GOING=0
+
+ # Seed value used with the --release-test option.
+ #
+ # See also RELEASE_SEED in basic-build-test.sh. Debugging is easier if
+ # both values are kept in sync. If you change the value here because it
+ # breaks some tests, you'll definitely want to change it in
+ # basic-build-test.sh as well.
+ RELEASE_SEED=1
+
+ # Specify character collation for regular expressions and sorting with C locale
+ export LC_COLLATE=C
+
+ : ${MBEDTLS_TEST_OUTCOME_FILE=}
+ : ${MBEDTLS_TEST_PLATFORM="$(uname -s | tr -c \\n0-9A-Za-z _)-$(uname -m | tr -c \\n0-9A-Za-z _)"}
+ export MBEDTLS_TEST_OUTCOME_FILE
+ export MBEDTLS_TEST_PLATFORM
+
+ # Default commands, can be overridden by the environment
+ : ${OPENSSL:="openssl"}
+ : ${OPENSSL_NEXT:="$OPENSSL"}
+ : ${GNUTLS_CLI:="gnutls-cli"}
+ : ${GNUTLS_SERV:="gnutls-serv"}
+ : ${OUT_OF_SOURCE_DIR:=./mbedtls_out_of_source_build}
+ : ${ARMC5_BIN_DIR:=/usr/bin}
+ : ${ARMC6_BIN_DIR:=/usr/bin}
+ : ${ARM_NONE_EABI_GCC_PREFIX:=arm-none-eabi-}
+ : ${ARM_LINUX_GNUEABI_GCC_PREFIX:=arm-linux-gnueabi-}
+ : ${CLANG_LATEST:="clang-latest"}
+ : ${CLANG_EARLIEST:="clang-earliest"}
+ : ${GCC_LATEST:="gcc-latest"}
+ : ${GCC_EARLIEST:="gcc-earliest"}
+ # if MAKEFLAGS is not set add the -j option to speed up invocations of make
+ if [ -z "${MAKEFLAGS+set}" ]; then
+ export MAKEFLAGS="-j$(all_sh_nproc)"
+ fi
+ # if CC is not set, use clang by default (if present) to improve build times
+ if [ -z "${CC+set}" ] && (type clang > /dev/null 2>&1); then
+ export CC="clang"
+ fi
+
+ # Include more verbose output for failing tests run by CMake or make
+ export CTEST_OUTPUT_ON_FAILURE=1
+
+ # CFLAGS and LDFLAGS for Asan builds that don't use CMake
+ # default to -O2, use -Ox _after_ this if you want another level
+ ASAN_CFLAGS='-O2 -Werror -fsanitize=address,undefined -fno-sanitize-recover=all'
+ # Normally, tests should use this compiler for ASAN testing
+ ASAN_CC=clang
+
+ # Platform tests have an allocation that returns null
+ export ASAN_OPTIONS="allocator_may_return_null=1"
+ export MSAN_OPTIONS="allocator_may_return_null=1"
+
+ # Gather the list of available components. These are the functions
+ # defined in this script whose name starts with "component_".
+ ALL_COMPONENTS=$(compgen -A function component_ | sed 's/component_//')
+
+ # Delay determining SUPPORTED_COMPONENTS until the command line options have a chance to override
+ # the commands set by the environment
+}
+
+setup_quiet_wrappers()
+{
+ # Pick up "quiet" wrappers for make and cmake, which don't output very much
+ # unless there is an error. This reduces logging overhead in the CI.
+ #
+ # Note that the cmake wrapper breaks unless we use an absolute path here.
+ if [[ -e ${PWD}/tests/scripts/quiet ]]; then
+ export PATH=${PWD}/tests/scripts/quiet:$PATH
+ fi
+}
+
+# Test whether the component $1 is included in the command line patterns.
+is_component_included()
+{
+ # Temporarily disable wildcard expansion so that $COMMAND_LINE_COMPONENTS
+ # only does word splitting.
+ set -f
+ for pattern in $COMMAND_LINE_COMPONENTS; do
+ set +f
+ case ${1#component_} in $pattern) return 0;; esac
+ done
+ set +f
+ return 1
+}
+
+usage()
+{
+ cat <<EOF
+Usage: $0 [OPTION]... [COMPONENT]...
+Run mbedtls release validation tests.
+By default, run all tests. With one or more COMPONENT, run only those.
+COMPONENT can be the name of a component or a shell wildcard pattern.
+
+Examples:
+ $0 "check_*"
+ Run all sanity checks.
+ $0 --no-armcc --except test_memsan
+ Run everything except builds that require armcc and MemSan.
+
+Special options:
+ -h|--help Print this help and exit.
+ --list-all-components List all available test components and exit.
+ --list-components List components supported on this platform and exit.
+
+General options:
+ -q|--quiet Only output component names, and errors if any.
+ -f|--force Force the tests to overwrite any modified files.
+ -k|--keep-going Run all tests and report errors at the end.
+ -m|--memory Additional optional memory tests.
+ --append-outcome Append to the outcome file (if used).
+ --arm-none-eabi-gcc-prefix=<string>
+ Prefix for a cross-compiler for arm-none-eabi
+ (default: "${ARM_NONE_EABI_GCC_PREFIX}")
+ --arm-linux-gnueabi-gcc-prefix=<string>
+ Prefix for a cross-compiler for arm-linux-gnueabi
+ (default: "${ARM_LINUX_GNUEABI_GCC_PREFIX}")
+ --armcc Run ARM Compiler builds (on by default).
+ --restore First clean up the build tree, restoring backed up
+ files. Do not run any components unless they are
+ explicitly specified.
+ --error-test Error test mode: run a failing function in addition
+ to any specified component. May be repeated.
+ --except Exclude the COMPONENTs listed on the command line,
+ instead of running only those.
+ --no-append-outcome Write a new outcome file and analyze it (default).
+ --no-armcc Skip ARM Compiler builds.
+ --no-force Refuse to overwrite modified files (default).
+ --no-keep-going Stop at the first error (default).
+ --no-memory No additional memory tests (default).
+ --no-quiet Print full output from components.
+ --out-of-source-dir=<path> Directory used for CMake out-of-source build tests.
+ --outcome-file=<path> File where test outcomes are written (not done if
+ empty; default: \$MBEDTLS_TEST_OUTCOME_FILE).
+ --random-seed Use a random seed value for randomized tests (default).
+ -r|--release-test Run this script in release mode. This fixes the seed value to ${RELEASE_SEED}.
+ -s|--seed Integer seed value to use for this test run.
+
+Tool path options:
+ --armc5-bin-dir=<ARMC5_bin_dir_path> ARM Compiler 5 bin directory.
+ --armc6-bin-dir=<ARMC6_bin_dir_path> ARM Compiler 6 bin directory.
+ --clang-earliest=<Clang_earliest_path> Earliest version of clang available
+ --clang-latest=<Clang_latest_path> Latest version of clang available
+ --gcc-earliest=<GCC_earliest_path> Earliest version of GCC available
+ --gcc-latest=<GCC_latest_path> Latest version of GCC available
+ --gnutls-cli=<GnuTLS_cli_path> GnuTLS client executable to use for most tests.
+ --gnutls-serv=<GnuTLS_serv_path> GnuTLS server executable to use for most tests.
+ --openssl=<OpenSSL_path> OpenSSL executable to use for most tests.
+ --openssl-next=<OpenSSL_path> OpenSSL executable to use for recent things like ARIA
+EOF
+}
+
+# Cleanup before/after running a component.
+# Remove built files as well as the cmake cache/config.
+# Does not remove generated source files.
+cleanup()
+{
+ if in_mbedtls_repo; then
+ command make clean
+ fi
+
+ # Remove CMake artefacts
+ find . -name .git -prune -o \
+ -iname CMakeFiles -exec rm -rf {} \+ -o \
+ \( -iname cmake_install.cmake -o \
+ -iname CTestTestfile.cmake -o \
+ -iname CMakeCache.txt -o \
+ -path './cmake/*.cmake' \) -exec rm -f {} \+
+ # Recover files overwritten by in-tree CMake builds
+ rm -f include/Makefile include/mbedtls/Makefile programs/!(fuzz)/Makefile
+
+ # Remove any artifacts from the component_test_cmake_as_subdirectory test.
+ rm -rf programs/test/cmake_subproject/build
+ rm -f programs/test/cmake_subproject/Makefile
+ rm -f programs/test/cmake_subproject/cmake_subproject
+
+ # Remove any artifacts from the component_test_cmake_as_package test.
+ rm -rf programs/test/cmake_package/build
+ rm -f programs/test/cmake_package/Makefile
+ rm -f programs/test/cmake_package/cmake_package
+
+ # Remove any artifacts from the component_test_cmake_as_installed_package test.
+ rm -rf programs/test/cmake_package_install/build
+ rm -f programs/test/cmake_package_install/Makefile
+ rm -f programs/test/cmake_package_install/cmake_package_install
+
+ # Restore files that may have been clobbered by the job
+ for x in $files_to_back_up; do
+ if [[ -e "$x$backup_suffix" ]]; then
+ cp -p "$x$backup_suffix" "$x"
+ fi
+ done
+}
+
+# Final cleanup when this script exits (except when exiting on a failure
+# in non-keep-going mode).
+final_cleanup () {
+ cleanup
+
+ for x in $files_to_back_up; do
+ rm -f "$x$backup_suffix"
+ done
+}
+
+# Executed on exit. May be redefined depending on command line options.
+final_report () {
+ :
+}
+
+fatal_signal () {
+ final_cleanup
+ final_report $1
+ trap - $1
+ kill -$1 $$
+}
+
+trap 'fatal_signal HUP' HUP
+trap 'fatal_signal INT' INT
+trap 'fatal_signal TERM' TERM
+
+# Number of processors on this machine. Used as the default setting
+# for parallel make.
+all_sh_nproc ()
+{
+ {
+ nproc || # Linux
+ sysctl -n hw.ncpuonline || # NetBSD, OpenBSD
+ sysctl -n hw.ncpu || # FreeBSD
+ echo 1
+ } 2>/dev/null
+}
+
+msg()
+{
+ if [ -n "${current_component:-}" ]; then
+ current_section="${current_component#component_}: $1"
+ else
+ current_section="$1"
+ fi
+
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
+ echo ""
+ echo "******************************************************************"
+ echo "* $current_section "
+ printf "* "; date
+ echo "******************************************************************"
+}
+
+armc6_build_test()
+{
+ FLAGS="$1"
+
+ msg "build: ARM Compiler 6 ($FLAGS)"
+ make clean
+ ARM_TOOL_VARIANT="ult" CC="$ARMC6_CC" AR="$ARMC6_AR" CFLAGS="$FLAGS" \
+ WARNING_CFLAGS='-Werror -xc -std=c99' make lib
+
+ msg "size: ARM Compiler 6 ($FLAGS)"
+ "$ARMC6_FROMELF" -z library/*.o
+}
+
+err_msg()
+{
+ echo "$1" >&2
+}
+
+check_tools()
+{
+ for tool in "$@"; do
+ if ! `type "$tool" >/dev/null 2>&1`; then
+ err_msg "$tool not found!"
+ exit 1
+ fi
+ done
+}
+
+pre_parse_command_line () {
+ COMMAND_LINE_COMPONENTS=
+ all_except=0
+ error_test=0
+ list_components=0
+ restore_first=0
+ no_armcc=
+
+ # Note that legacy options are ignored instead of being omitted from this
+ # list of options, so invocations that worked with previous version of
+ # all.sh will still run and work properly.
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ --append-outcome) append_outcome=1;;
+ --arm-none-eabi-gcc-prefix) shift; ARM_NONE_EABI_GCC_PREFIX="$1";;
+ --arm-linux-gnueabi-gcc-prefix) shift; ARM_LINUX_GNUEABI_GCC_PREFIX="$1";;
+ --armcc) no_armcc=;;
+ --armc5-bin-dir) shift; ARMC5_BIN_DIR="$1";;
+ --armc6-bin-dir) shift; ARMC6_BIN_DIR="$1";;
+ --clang-earliest) shift; CLANG_EARLIEST="$1";;
+ --clang-latest) shift; CLANG_LATEST="$1";;
+ --error-test) error_test=$((error_test + 1));;
+ --except) all_except=1;;
+ --force|-f) FORCE=1;;
+ --gcc-earliest) shift; GCC_EARLIEST="$1";;
+ --gcc-latest) shift; GCC_LATEST="$1";;
+ --gnutls-cli) shift; GNUTLS_CLI="$1";;
+ --gnutls-legacy-cli) shift;; # ignored for backward compatibility
+ --gnutls-legacy-serv) shift;; # ignored for backward compatibility
+ --gnutls-serv) shift; GNUTLS_SERV="$1";;
+ --help|-h) usage; exit;;
+ --keep-going|-k) KEEP_GOING=1;;
+ --list-all-components) printf '%s\n' $ALL_COMPONENTS; exit;;
+ --list-components) list_components=1;;
+ --memory|-m) MEMORY=1;;
+ --no-append-outcome) append_outcome=0;;
+ --no-armcc) no_armcc=1;;
+ --no-force) FORCE=0;;
+ --no-keep-going) KEEP_GOING=0;;
+ --no-memory) MEMORY=0;;
+ --no-quiet) QUIET=0;;
+ --openssl) shift; OPENSSL="$1";;
+ --openssl-next) shift; OPENSSL_NEXT="$1";;
+ --outcome-file) shift; MBEDTLS_TEST_OUTCOME_FILE="$1";;
+ --out-of-source-dir) shift; OUT_OF_SOURCE_DIR="$1";;
+ --quiet|-q) QUIET=1;;
+ --random-seed) unset SEED;;
+ --release-test|-r) SEED=$RELEASE_SEED;;
+ --restore) restore_first=1;;
+ --seed|-s) shift; SEED="$1";;
+ -*)
+ echo >&2 "Unknown option: $1"
+ echo >&2 "Run $0 --help for usage."
+ exit 120
+ ;;
+ *) COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS $1";;
+ esac
+ shift
+ done
+
+ # Exclude components that are not supported on this platform.
+ SUPPORTED_COMPONENTS=
+ for component in $ALL_COMPONENTS; do
+ case $(type "support_$component" 2>&1) in
+ *' function'*)
+ if ! support_$component; then continue; fi;;
+ esac
+ SUPPORTED_COMPONENTS="$SUPPORTED_COMPONENTS $component"
+ done
+
+ if [ $list_components -eq 1 ]; then
+ printf '%s\n' $SUPPORTED_COMPONENTS
+ exit
+ fi
+
+ # With no list of components, run everything.
+ if [ -z "$COMMAND_LINE_COMPONENTS" ] && [ $restore_first -eq 0 ]; then
+ all_except=1
+ fi
+
+ # --no-armcc is a legacy option. The modern way is --except '*_armcc*'.
+ # Ignore it if components are listed explicitly on the command line.
+ if [ -n "$no_armcc" ] && [ $all_except -eq 1 ]; then
+ COMMAND_LINE_COMPONENTS="$COMMAND_LINE_COMPONENTS *_armcc*"
+ fi
+
+ # Error out if an explicitly requested component doesn't exist.
+ if [ $all_except -eq 0 ]; then
+ unsupported=0
+ # Temporarily disable wildcard expansion so that $COMMAND_LINE_COMPONENTS
+ # only does word splitting.
+ set -f
+ for component in $COMMAND_LINE_COMPONENTS; do
+ set +f
+ # If the requested name includes a wildcard character, don't
+ # check it. Accept wildcard patterns that don't match anything.
+ case $component in
+ *[*?\[]*) continue;;
+ esac
+ case " $SUPPORTED_COMPONENTS " in
+ *" $component "*) :;;
+ *)
+ echo >&2 "Component $component was explicitly requested, but is not known or not supported."
+ unsupported=$((unsupported + 1));;
+ esac
+ done
+ set +f
+ if [ $unsupported -ne 0 ]; then
+ exit 2
+ fi
+ fi
+
+ # Build the list of components to run.
+ RUN_COMPONENTS=
+ for component in $SUPPORTED_COMPONENTS; do
+ if is_component_included "$component"; [ $? -eq $all_except ]; then
+ RUN_COMPONENTS="$RUN_COMPONENTS $component"
+ fi
+ done
+
+ unset all_except
+ unset no_armcc
+}
+
+pre_check_git () {
+ if [ $FORCE -eq 1 ]; then
+ rm -rf "$OUT_OF_SOURCE_DIR"
+ git checkout-index -f -q $CONFIG_H
+ cleanup
+ else
+
+ if [ -d "$OUT_OF_SOURCE_DIR" ]; then
+ echo "Warning - there is an existing directory at '$OUT_OF_SOURCE_DIR'" >&2
+ echo "You can either delete this directory manually, or force the test by rerunning"
+ echo "the script as: $0 --force --out-of-source-dir $OUT_OF_SOURCE_DIR"
+ exit 1
+ fi
+
+ if ! git diff --quiet "$CONFIG_H"; then
+ err_msg "Warning - the configuration file '$CONFIG_H' has been edited. "
+ echo "You can either delete or preserve your work, or force the test by rerunning the"
+ echo "script as: $0 --force"
+ exit 1
+ fi
+ fi
+}
+
+pre_restore_files () {
+ # If the makefiles have been generated by a framework such as cmake,
+ # restore them from git. If the makefiles look like modifications from
+ # the ones checked into git, take care not to modify them. Whatever
+ # this function leaves behind is what the script will restore before
+ # each component.
+ case "$(head -n1 Makefile)" in
+ *[Gg]enerated*)
+ git update-index --no-skip-worktree Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+ git checkout -- Makefile library/Makefile programs/Makefile tests/Makefile programs/fuzz/Makefile
+ ;;
+ esac
+}
+
+pre_back_up () {
+ for x in $files_to_back_up; do
+ cp -p "$x" "$x$backup_suffix"
+ done
+}
+
+pre_setup_keep_going () {
+ failure_count=0 # Number of failed components
+ last_failure_status=0 # Last failure status in this component
+
+ # See err_trap
+ previous_failure_status=0
+ previous_failed_command=
+ previous_failure_funcall_depth=0
+ unset report_failed_command
+
+ start_red=
+ end_color=
+ if [ -t 1 ]; then
+ case "${TERM:-}" in
+ *color*|cygwin|linux|rxvt*|screen|[Eex]term*)
+ start_red=$(printf '\033[31m')
+ end_color=$(printf '\033[0m')
+ ;;
+ esac
+ fi
+
+ # Keep a summary of failures in a file. We'll print it out at the end.
+ failure_summary_file=$PWD/all-sh-failures-$$.log
+ : >"$failure_summary_file"
+
+ # Whether it makes sense to keep a component going after the specified
+ # command fails (test command) or not (configure or build).
+ # This function normally receives the failing simple command
+ # ($BASH_COMMAND) as an argument, but if $report_failed_command is set,
+ # this is passed instead.
+ # This doesn't have to be 100% accurate: all failures are recorded anyway.
+ # False positives result in running things that can't be expected to
+ # work. False negatives result in things not running after something else
+ # failed even though they might have given useful feedback.
+ can_keep_going_after_failure () {
+ case "$1" in
+ "msg "*) false;;
+ "cd "*) false;;
+ "diff "*) true;;
+ *make*[\ /]tests*) false;; # make tests, make CFLAGS=-I../tests, ...
+ *test*) true;; # make test, tests/stuff, env V=v tests/stuff, ...
+ *make*check*) true;;
+ "grep "*) true;;
+ "[ "*) true;;
+ "! "*) true;;
+ *) false;;
+ esac
+ }
+
+ # This function runs if there is any error in a component.
+ # It must either exit with a nonzero status, or set
+ # last_failure_status to a nonzero value.
+ err_trap () {
+ # Save $? (status of the failing command). This must be the very
+ # first thing, before $? is overridden.
+ last_failure_status=$?
+ failed_command=${report_failed_command-$BASH_COMMAND}
+
+ if [[ $last_failure_status -eq $previous_failure_status &&
+ "$failed_command" == "$previous_failed_command" &&
+ ${#FUNCNAME[@]} == $((previous_failure_funcall_depth - 1)) ]]
+ then
+ # The same command failed twice in a row, but this time one level
+ # less deep in the function call stack. This happens when the last
+ # command of a function returns a nonzero status, and the function
+ # returns that same status. Ignore the second failure.
+ previous_failure_funcall_depth=${#FUNCNAME[@]}
+ return
+ fi
+ previous_failure_status=$last_failure_status
+ previous_failed_command=$failed_command
+ previous_failure_funcall_depth=${#FUNCNAME[@]}
+
+ text="$current_section: $failed_command -> $last_failure_status"
+ echo "${start_red}^^^^$text^^^^${end_color}" >&2
+ echo "$text" >>"$failure_summary_file"
+
+ # If the command is fatal (configure or build command), stop this
+ # component. Otherwise (test command) keep the component running
+ # (run more tests from the same build).
+ if ! can_keep_going_after_failure "$failed_command"; then
+ exit $last_failure_status
+ fi
+ }
+
+ final_report () {
+ if [ $failure_count -gt 0 ]; then
+ echo
+ echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+ echo "${start_red}FAILED: $failure_count components${end_color}"
+ cat "$failure_summary_file"
+ echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
+ elif [ -z "${1-}" ]; then
+ echo "SUCCESS :)"
+ fi
+ if [ -n "${1-}" ]; then
+ echo "Killed by SIG$1."
+ fi
+ rm -f "$failure_summary_file"
+ if [ $failure_count -gt 0 ]; then
+ exit 1
+ fi
+ }
+}
+
+# record_status() and if_build_succeeded() are kept temporarily for backward
+# compatibility. Don't use them in new components.
+record_status () {
+ "$@"
+}
+if_build_succeeded () {
+ "$@"
+}
+
+# '! true' does not trigger the ERR trap. Arrange to trigger it, with
+# a reasonably informative error message (not just "$@").
+not () {
+ if "$@"; then
+ report_failed_command="! $*"
+ false
+ unset report_failed_command
+ fi
+}
+
+pre_prepare_outcome_file () {
+ case "$MBEDTLS_TEST_OUTCOME_FILE" in
+ [!/]*) MBEDTLS_TEST_OUTCOME_FILE="$PWD/$MBEDTLS_TEST_OUTCOME_FILE";;
+ esac
+ if [ -n "$MBEDTLS_TEST_OUTCOME_FILE" ] && [ "$append_outcome" -eq 0 ]; then
+ rm -f "$MBEDTLS_TEST_OUTCOME_FILE"
+ fi
+}
+
+pre_print_configuration () {
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
+ msg "info: $0 configuration"
+ echo "MEMORY: $MEMORY"
+ echo "FORCE: $FORCE"
+ echo "MBEDTLS_TEST_OUTCOME_FILE: ${MBEDTLS_TEST_OUTCOME_FILE:-(none)}"
+ echo "SEED: ${SEED-"UNSET"}"
+ echo
+ echo "OPENSSL: $OPENSSL"
+ echo "OPENSSL_NEXT: $OPENSSL_NEXT"
+ echo "GNUTLS_CLI: $GNUTLS_CLI"
+ echo "GNUTLS_SERV: $GNUTLS_SERV"
+ echo "ARMC5_BIN_DIR: $ARMC5_BIN_DIR"
+ echo "ARMC6_BIN_DIR: $ARMC6_BIN_DIR"
+}
+
+# Make sure the tools we need are available.
+pre_check_tools () {
+ # Build the list of variables to pass to output_env.sh.
+ set env
+
+ case " $RUN_COMPONENTS " in
+ # Require OpenSSL and GnuTLS if running any tests (as opposed to
+ # only doing builds). Not all tests run OpenSSL and GnuTLS, but this
+ # is a good enough approximation in practice.
+ *" test_"* | *" release_test_"*)
+ # To avoid setting OpenSSL and GnuTLS for each call to compat.sh
+ # and ssl-opt.sh, we just export the variables they require.
+ export OPENSSL="$OPENSSL"
+ export GNUTLS_CLI="$GNUTLS_CLI"
+ export GNUTLS_SERV="$GNUTLS_SERV"
+ # Avoid passing --seed flag in every call to ssl-opt.sh
+ if [ -n "${SEED-}" ]; then
+ export SEED
+ fi
+ set "$@" OPENSSL="$OPENSSL"
+ set "$@" GNUTLS_CLI="$GNUTLS_CLI" GNUTLS_SERV="$GNUTLS_SERV"
+ check_tools "$OPENSSL" "$OPENSSL_NEXT" \
+ "$GNUTLS_CLI" "$GNUTLS_SERV"
+ ;;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_doxygen[_\ ]*) check_tools "doxygen" "dot";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_arm_none_eabi_gcc[_\ ]*) check_tools "${ARM_NONE_EABI_GCC_PREFIX}gcc";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_mingw[_\ ]*) check_tools "i686-w64-mingw32-gcc";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *" test_zeroize "*) check_tools "gdb";;
+ esac
+
+ case " $RUN_COMPONENTS " in
+ *_armcc*)
+ ARMC5_CC="$ARMC5_BIN_DIR/armcc"
+ ARMC5_AR="$ARMC5_BIN_DIR/armar"
+ ARMC5_FROMELF="$ARMC5_BIN_DIR/fromelf"
+ ARMC6_CC="$ARMC6_BIN_DIR/armclang"
+ ARMC6_AR="$ARMC6_BIN_DIR/armar"
+ ARMC6_FROMELF="$ARMC6_BIN_DIR/fromelf"
+ check_tools "$ARMC5_CC" "$ARMC5_AR" "$ARMC5_FROMELF" \
+ "$ARMC6_CC" "$ARMC6_AR" "$ARMC6_FROMELF";;
+ esac
+
+ # past this point, no call to check_tool, only printing output
+ if [ $QUIET -eq 1 ]; then
+ return
+ fi
+
+ msg "info: output_env.sh"
+ case $RUN_COMPONENTS in
+ *_armcc*)
+ set "$@" ARMC5_CC="$ARMC5_CC" ARMC6_CC="$ARMC6_CC" RUN_ARMCC=1;;
+ *) set "$@" RUN_ARMCC=0;;
+ esac
+ "$@" scripts/output_env.sh
+}
+
+pre_generate_files() {
+ # since make doesn't have proper dependencies, remove any possibly outdate
+ # file that might be around before generating fresh ones
+ make neat
+ if [ $QUIET -eq 1 ]; then
+ make generated_files >/dev/null
+ else
+ make generated_files
+ fi
+}
+
+clang_version() {
+ if command -v clang > /dev/null ; then
+ clang --version|grep version|sed -E 's#.*version ([0-9]+).*#\1#'
+ else
+ echo 0 # report version 0 for "no clang"
+ fi
+}
+
+################################################################
+#### Helpers for components using libtestdriver1
+################################################################
+
+# How to use libtestdriver1
+# -------------------------
+#
+# 1. Define the list algorithms and key types to accelerate,
+# designated the same way as PSA_WANT_ macros but without PSA_WANT_.
+# Examples:
+# - loc_accel_list="ALG_JPAKE"
+# - loc_accel_list="ALG_FFDH KEY_TYPE_DH_KEY_PAIR KEY_TYPE_DH_PUBLIC_KEY"
+# 2. Make configurations changes for the driver and/or main libraries.
+# 2a. Call helper_libtestdriver1_adjust_config <base>, where the argument
+# can be either "default" to start with the default config, or a name
+# supported by scripts/config.py (for example, "full"). This selects
+# the base to use, and makes common adjustments.
+# 2b. If desired, adjust the PSA_WANT symbols in psa/crypto_config.h.
+# These changes affect both the driver and the main libraries.
+# (Note: they need to have the same set of PSA_WANT symbols, as that
+# determines the ABI between them.)
+# 2c. Adjust MBEDTLS_ symbols in mbedtls_config.h. This only affects the
+# main libraries. Typically, you want to disable the module(s) that are
+# being accelerated. You may need to also disable modules that depend
+# on them or options that are not supported with drivers.
+# 2d. On top of psa/crypto_config.h, the driver library uses its own config
+# file: tests/include/test/drivers/config_test_driver.h. You usually
+# don't need to edit it: using loc_extra_list (see below) is preferred.
+# However, when there's no PSA symbol for what you want to enable,
+# calling scripts/config.py on this file remains the only option.
+# 3. Build the driver library, then the main libraries, test, and programs.
+# 3a. Call helper_libtestdriver1_make_drivers "$loc_accel_list". You may
+# need to enable more algorithms here, typically hash algorithms when
+# accelerating some signature algorithms (ECDSA, RSAv2). This is done
+# by passing a 2nd argument listing the extra algorithms.
+# Example:
+# loc_extra_list="ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512"
+# helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+# 3b. Call helper_libtestdriver1_make_main "$loc_accel_list". Any
+# additional arguments will be passed to make: this can be useful if
+# you don't want to build everything when iterating during development.
+# Example:
+# helper_libtestdriver1_make_main "$loc_accel_list" -C tests test_suite_foo
+# 4. Run the tests you want.
+
+# Adjust the configuration - for both libtestdriver1 and main library,
+# as they should have the same PSA_WANT macros.
+helper_libtestdriver1_adjust_config() {
+ base_config=$1
+ # Select the base configuration
+ if [ "$base_config" != "default" ]; then
+ scripts/config.py "$base_config"
+ fi
+
+ # Enable PSA-based config (necessary to use drivers)
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+
+ # Dynamic secure element support is a deprecated feature and needs to be disabled here.
+ # This is done to have the same form of psa_key_attributes_s for libdriver and library.
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+
+ # If threading is enabled on the normal build, then we need to enable it in the drivers as well,
+ # otherwise we will end up running multithreaded tests without mutexes to protect them.
+ if scripts/config.py get MBEDTLS_THREADING_C; then
+ scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_C
+ fi
+
+ if scripts/config.py get MBEDTLS_THREADING_PTHREAD; then
+ scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_THREADING_PTHREAD
+ fi
+}
+
+# When called with no parameter this function disables all builtin curves.
+# The function optionally accepts 1 parameter: a space-separated list of the
+# curves that should be kept enabled.
+helper_disable_builtin_curves() {
+ allowed_list="${1:-}"
+ scripts/config.py unset-all "MBEDTLS_ECP_DP_[0-9A-Z_a-z]*_ENABLED"
+
+ for curve in $allowed_list; do
+ scripts/config.py set $curve
+ done
+}
+
+# Helper returning the list of supported elliptic curves from CRYPTO_CONFIG_H,
+# without the "PSA_WANT_" prefix. This becomes handy for accelerating curves
+# in the following helpers.
+helper_get_psa_curve_list () {
+ loc_list=""
+ for item in $(sed -n 's/^#define PSA_WANT_\(ECC_[0-9A-Z_a-z]*\).*/\1/p' <"$CRYPTO_CONFIG_H"); do
+ loc_list="$loc_list $item"
+ done
+
+ echo "$loc_list"
+}
+
+# Helper returning the list of supported DH groups from CRYPTO_CONFIG_H,
+# without the "PSA_WANT_" prefix. This becomes handy for accelerating DH groups
+# in the following helpers.
+helper_get_psa_dh_group_list () {
+ loc_list=""
+ for item in $(sed -n 's/^#define PSA_WANT_\(DH_RFC7919_[0-9]*\).*/\1/p' <"$CRYPTO_CONFIG_H"); do
+ loc_list="$loc_list $item"
+ done
+
+ echo "$loc_list"
+}
+
+# Get the list of uncommented PSA_WANT_KEY_TYPE_xxx_ from CRYPTO_CONFIG_H. This
+# is useful to easily get a list of key type symbols to accelerate.
+# The function accepts a single argument which is the key type: ECC, DH, RSA.
+helper_get_psa_key_type_list() {
+ key_type="$1"
+ loc_list=""
+ for item in $(sed -n "s/^#define PSA_WANT_\(KEY_TYPE_${key_type}_[0-9A-Z_a-z]*\).*/\1/p" <"$CRYPTO_CONFIG_H"); do
+ # Skip DERIVE for elliptic keys since there is no driver dispatch for
+ # it so it cannot be accelerated.
+ if [ "$item" != "KEY_TYPE_ECC_KEY_PAIR_DERIVE" ]; then
+ loc_list="$loc_list $item"
+ fi
+ done
+
+ echo "$loc_list"
+}
+
+# Build the drivers library libtestdriver1.a (with ASan).
+#
+# Parameters:
+# 1. a space-separated list of things to accelerate;
+# 2. optional: a space-separate list of things to also support.
+# Here "things" are PSA_WANT_ symbols but with PSA_WANT_ removed.
+helper_libtestdriver1_make_drivers() {
+ loc_accel_flags=$( echo "$1 ${2-}" | sed 's/[^ ]* */-DLIBTESTDRIVER1_MBEDTLS_PSA_ACCEL_&/g' )
+ make CC=$ASAN_CC -C tests libtestdriver1.a CFLAGS=" $ASAN_CFLAGS $loc_accel_flags" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# Build the main libraries, programs and tests,
+# linking to the drivers library (with ASan).
+#
+# Parameters:
+# 1. a space-separated list of things to accelerate;
+# *. remaining arguments if any are passed directly to make
+# (examples: lib, -C tests test_suite_xxx, etc.)
+# Here "things" are PSA_WANT_ symbols but with PSA_WANT_ removed.
+helper_libtestdriver1_make_main() {
+ loc_accel_list=$1
+ shift
+
+ # we need flags both with and without the LIBTESTDRIVER1_ prefix
+ loc_accel_flags=$( echo "$loc_accel_list" | sed 's/[^ ]* */-DLIBTESTDRIVER1_MBEDTLS_PSA_ACCEL_&/g' )
+ loc_accel_flags="$loc_accel_flags $( echo "$loc_accel_list" | sed 's/[^ ]* */-DMBEDTLS_PSA_ACCEL_&/g' )"
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -I../tests/include -I../tests -I../../tests -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_TEST_LIBTESTDRIVER1 $loc_accel_flags" LDFLAGS="-ltestdriver1 $ASAN_CFLAGS" "$@"
+}
+
+################################################################
+#### Basic checks
+################################################################
+
+#
+# Test Suites to be executed
+#
+# The test ordering tries to optimize for the following criteria:
+# 1. Catch possible problems early, by running first tests that run quickly
+# and/or are more likely to fail than others (eg I use Clang most of the
+# time, so start with a GCC build).
+# 2. Minimize total running time, by avoiding useless rebuilds
+#
+# Indicative running times are given for reference.
+
+component_check_recursion () {
+ msg "Check: recursion.pl" # < 1s
+ tests/scripts/recursion.pl library/*.c
+}
+
+component_check_generated_files () {
+ msg "Check: check-generated-files, files generated with make" # 2s
+ make generated_files
+ tests/scripts/check-generated-files.sh
+
+ msg "Check: check-generated-files -u, files present" # 2s
+ tests/scripts/check-generated-files.sh -u
+ # Check that the generated files are considered up to date.
+ tests/scripts/check-generated-files.sh
+
+ msg "Check: check-generated-files -u, files absent" # 2s
+ command make neat
+ tests/scripts/check-generated-files.sh -u
+ # Check that the generated files are considered up to date.
+ tests/scripts/check-generated-files.sh
+
+ # This component ends with the generated files present in the source tree.
+ # This is necessary for subsequent components!
+}
+
+component_check_doxy_blocks () {
+ msg "Check: doxygen markup outside doxygen blocks" # < 1s
+ tests/scripts/check-doxy-blocks.pl
+}
+
+component_check_files () {
+ msg "Check: file sanity checks (permissions, encodings)" # < 1s
+ tests/scripts/check_files.py
+}
+
+component_check_changelog () {
+ msg "Check: changelog entries" # < 1s
+ rm -f ChangeLog.new
+ scripts/assemble_changelog.py -o ChangeLog.new
+ if [ -e ChangeLog.new ]; then
+ # Show the diff for information. It isn't an error if the diff is
+ # non-empty.
+ diff -u ChangeLog ChangeLog.new || true
+ rm ChangeLog.new
+ fi
+}
+
+component_check_names () {
+ msg "Check: declared and exported names (builds the library)" # < 3s
+ tests/scripts/check_names.py -v
+}
+
+component_check_test_cases () {
+ msg "Check: test case descriptions" # < 1s
+ if [ $QUIET -eq 1 ]; then
+ opt='--quiet'
+ else
+ opt=''
+ fi
+ tests/scripts/check_test_cases.py -q $opt
+ unset opt
+}
+
+component_check_test_dependencies () {
+ msg "Check: test case dependencies: legacy vs PSA" # < 1s
+ # The purpose of this component is to catch unjustified dependencies on
+ # legacy feature macros (MBEDTLS_xxx) in PSA tests. Generally speaking,
+ # PSA test should use PSA feature macros (PSA_WANT_xxx, more rarely
+ # MBEDTLS_PSA_xxx).
+ #
+ # Most of the time, use of legacy MBEDTLS_xxx macros are mistakes, which
+ # this component is meant to catch. However a few of them are justified,
+ # mostly by the absence of a PSA equivalent, so this component includes a
+ # list of expected exceptions.
+
+ found="check-test-deps-found-$$"
+ expected="check-test-deps-expected-$$"
+
+ # Find legacy dependencies in PSA tests
+ grep 'depends_on' \
+ tests/suites/test_suite_psa*.data tests/suites/test_suite_psa*.function |
+ grep -Eo '!?MBEDTLS_[^: ]*' |
+ grep -v -e MBEDTLS_PSA_ -e MBEDTLS_TEST_ |
+ sort -u > $found
+
+ # Expected ones with justification - keep in sorted order by ASCII table!
+ rm -f $expected
+ # No PSA equivalent - WANT_KEY_TYPE_AES means all sizes
+ echo "!MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH" >> $expected
+ # No PSA equivalent - used to skip decryption tests in PSA-ECB, CBC/XTS/NIST_KW/DES
+ echo "!MBEDTLS_BLOCK_CIPHER_NO_DECRYPT" >> $expected
+ # MBEDTLS_ASN1_WRITE_C is used by import_rsa_made_up() in test_suite_psa_crypto
+ # in order to build a fake RSA key of the wanted size based on
+ # PSA_VENDOR_RSA_MAX_KEY_BITS. The legacy module is only used by
+ # the test code and that's probably the most convenient way of achieving
+ # the test's goal.
+ echo "MBEDTLS_ASN1_WRITE_C" >> $expected
+ # No PSA equivalent - we should probably have one in the future.
+ echo "MBEDTLS_ECP_RESTARTABLE" >> $expected
+ # No PSA equivalent - needed by some init tests
+ echo "MBEDTLS_ENTROPY_NV_SEED" >> $expected
+ # No PSA equivalent - required to run threaded tests.
+ echo "MBEDTLS_THREADING_PTHREAD" >> $expected
+
+ # Compare reality with expectation.
+ # We want an exact match, to ensure the above list remains up-to-date.
+ #
+ # The output should be empty. When it's not:
+ # - Each '+' line is a macro that was found but not expected. You want to
+ # find where that macro occurs, and either replace it with PSA macros, or
+ # add it to the exceptions list above with a justification.
+ # - Each '-' line is a macro that was expected but not found; it means the
+ # exceptions list above should be updated by removing that macro.
+ diff -U0 $expected $found
+
+ rm $found $expected
+}
+
+component_check_doxygen_warnings () {
+ msg "Check: doxygen warnings (builds the documentation)" # ~ 3s
+ tests/scripts/doxygen.sh
+}
+
+
+
+################################################################
+#### Build and test many configurations and targets
+################################################################
+
+component_test_default_out_of_box () {
+ msg "build: make, default config (out-of-box)" # ~1min
+ make
+ # Disable fancy stuff
+ unset MBEDTLS_TEST_OUTCOME_FILE
+
+ msg "test: main suites make, default config (out-of-box)" # ~10s
+ make test
+
+ msg "selftest: make, default config (out-of-box)" # ~10s
+ programs/test/selftest
+
+ msg "program demos: make, default config (out-of-box)" # ~10s
+ tests/scripts/run_demos.py
+}
+
+component_test_default_cmake_gcc_asan () {
+ msg "build: cmake, gcc, ASan" # ~ 1 min 50s
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "program demos (ASan build)" # ~10s
+ tests/scripts/run_demos.py
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ programs/test/selftest
+
+ msg "test: metatests (GCC, ASan build)"
+ tests/scripts/run-metatests.sh any asan poison
+
+ msg "test: ssl-opt.sh (ASan build)" # ~ 1 min
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh (ASan build)" # ~ 6 min
+ tests/compat.sh
+
+ msg "test: context-info.sh (ASan build)" # ~ 15 sec
+ tests/context-info.sh
+}
+
+component_test_default_cmake_gcc_asan_new_bignum () {
+ msg "build: cmake, gcc, ASan" # ~ 1 min 50s
+ scripts/config.py set MBEDTLS_ECP_WITH_MPI_UINT
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ programs/test/selftest
+
+ msg "test: ssl-opt.sh (ASan build)" # ~ 1 min
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh (ASan build)" # ~ 6 min
+ tests/compat.sh
+
+ msg "test: context-info.sh (ASan build)" # ~ 15 sec
+ tests/context-info.sh
+}
+
+component_test_full_cmake_gcc_asan () {
+ msg "build: full config, cmake, gcc, ASan"
+ scripts/config.py full
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (full config, ASan build)"
+ make test
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ programs/test/selftest
+
+ msg "test: ssl-opt.sh (full config, ASan build)"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh (full config, ASan build)"
+ tests/compat.sh
+
+ msg "test: context-info.sh (full config, ASan build)" # ~ 15 sec
+ tests/context-info.sh
+}
+
+
+component_test_full_cmake_gcc_asan_new_bignum () {
+ msg "build: full config, cmake, gcc, ASan"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_ECP_WITH_MPI_UINT
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (full config, ASan build)"
+ make test
+
+ msg "test: selftest (ASan build)" # ~ 10s
+ programs/test/selftest
+
+ msg "test: ssl-opt.sh (full config, ASan build)"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh (full config, ASan build)"
+ tests/compat.sh
+
+ msg "test: context-info.sh (full config, ASan build)" # ~ 15 sec
+ tests/context-info.sh
+}
+
+component_test_psa_crypto_key_id_encodes_owner () {
+ msg "build: full config + PSA_CRYPTO_KEY_ID_ENCODES_OWNER, cmake, gcc, ASan"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full config - USE_PSA_CRYPTO + PSA_CRYPTO_KEY_ID_ENCODES_OWNER, cmake, gcc, ASan"
+ make test
+}
+
+component_test_psa_assume_exclusive_buffers () {
+ msg "build: full config + MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS, cmake, gcc, ASan"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full config + MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS, cmake, gcc, ASan"
+ make test
+}
+
+# check_renamed_symbols HEADER LIB
+# Check that if HEADER contains '#define MACRO ...' then MACRO is not a symbol
+# name is LIB.
+check_renamed_symbols () {
+ ! nm "$2" | sed 's/.* //' |
+ grep -x -F "$(sed -n 's/^ *# *define *\([A-Z_a-z][0-9A-Z_a-z]*\)..*/\1/p' "$1")"
+}
+
+component_build_psa_crypto_spm () {
+ msg "build: full config + PSA_CRYPTO_KEY_ID_ENCODES_OWNER + PSA_CRYPTO_SPM, make, gcc"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_SPM
+ # We can only compile, not link, since our test and sample programs
+ # aren't equipped for the modified names used when MBEDTLS_PSA_CRYPTO_SPM
+ # is active.
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -I../tests/include/spe' lib
+
+ # Check that if a symbol is renamed by crypto_spe.h, the non-renamed
+ # version is not present.
+ echo "Checking for renamed symbols in the library"
+ check_renamed_symbols tests/include/spe/crypto_spe.h library/libmbedcrypto.a
+}
+
+# Get a list of library-wise undefined symbols and ensure that they only
+# belong to psa_xxx() functions and not to mbedtls_yyy() ones.
+# This function is a common helper used by both:
+# - component_test_default_psa_crypto_client_without_crypto_provider
+# - component_build_full_psa_crypto_client_without_crypto_provider.
+common_check_mbedtls_missing_symbols() {
+ nm library/libmbedcrypto.a | grep ' [TRrDC] ' | grep -Eo '(mbedtls_|psa_).*' | sort -u > sym_def.txt
+ nm library/libmbedcrypto.a | grep ' U ' | grep -Eo '(mbedtls_|psa_).*' | sort -u > sym_undef.txt
+ comm sym_def.txt sym_undef.txt -13 > linking_errors.txt
+ not grep mbedtls_ linking_errors.txt
+
+ rm sym_def.txt sym_undef.txt linking_errors.txt
+}
+
+component_test_default_psa_crypto_client_without_crypto_provider () {
+ msg "build: default config - PSA_CRYPTO_C + PSA_CRYPTO_CLIENT"
+
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CLIENT
+ scripts/config.py unset MBEDTLS_LMS_C
+
+ make
+
+ msg "check missing symbols: default config - PSA_CRYPTO_C + PSA_CRYPTO_CLIENT"
+ common_check_mbedtls_missing_symbols
+
+ msg "test: default config - PSA_CRYPTO_C + PSA_CRYPTO_CLIENT"
+ make test
+}
+
+component_build_full_psa_crypto_client_without_crypto_provider () {
+ msg "build: full config - PSA_CRYPTO_C"
+
+ # Use full config which includes USE_PSA and CRYPTO_CLIENT.
+ scripts/config.py full
+
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ # Dynamic secure element support is a deprecated feature and it is not
+ # available when CRYPTO_C and PSA_CRYPTO_STORAGE_C are disabled.
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+
+ # Since there is no crypto provider in this build it is not possible to
+ # build all the test executables and progrems due to missing PSA functions
+ # at link time. Therefore we will just build libraries and we'll check
+ # that symbols of interest are there.
+ make lib
+
+ msg "check missing symbols: full config - PSA_CRYPTO_C"
+
+ common_check_mbedtls_missing_symbols
+
+ # Ensure that desired functions are included into the build (extend the
+ # following list as required).
+ grep mbedtls_pk_get_psa_attributes library/libmbedcrypto.a
+ grep mbedtls_pk_import_into_psa library/libmbedcrypto.a
+ grep mbedtls_pk_copy_from_psa library/libmbedcrypto.a
+}
+
+component_test_psa_crypto_rsa_no_genprime() {
+ msg "build: default config minus MBEDTLS_GENPRIME"
+ scripts/config.py unset MBEDTLS_GENPRIME
+ make
+
+ msg "test: default config minus MBEDTLS_GENPRIME"
+ make test
+}
+
+component_test_ref_configs () {
+ msg "test/build: ref-configs (ASan build)" # ~ 6 min 20s
+ # test-ref-configs works by overwriting mbedtls_config.h; this makes cmake
+ # want to re-generate generated files that depend on it, quite correctly.
+ # However this doesn't work as the generation script expects a specific
+ # format for mbedtls_config.h, which the other files don't follow. Also,
+ # cmake can't know this, but re-generation is actually not necessary as
+ # the generated files only depend on the list of available options, not
+ # whether they're on or off. So, disable cmake's (over-sensitive here)
+ # dependency resolution for generated files and just rely on them being
+ # present (thanks to pre_generate_files) by turning GEN_FILES off.
+ CC=$ASAN_CC cmake -D GEN_FILES=Off -D CMAKE_BUILD_TYPE:String=Asan .
+ tests/scripts/test-ref-configs.pl
+}
+
+component_test_no_renegotiation () {
+ msg "build: Default + !MBEDTLS_SSL_RENEGOTIATION (ASan build)" # ~ 6 min
+ scripts/config.py unset MBEDTLS_SSL_RENEGOTIATION
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_SSL_RENEGOTIATION - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: !MBEDTLS_SSL_RENEGOTIATION - ssl-opt.sh (ASan build)" # ~ 6 min
+ tests/ssl-opt.sh
+}
+
+component_test_no_pem_no_fs () {
+ msg "build: Default + !MBEDTLS_PEM_PARSE_C + !MBEDTLS_FS_IO (ASan build)"
+ scripts/config.py unset MBEDTLS_PEM_PARSE_C
+ scripts/config.py unset MBEDTLS_FS_IO
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C # requires a filesystem
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C # requires PSA ITS
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_PEM_PARSE_C !MBEDTLS_FS_IO - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: !MBEDTLS_PEM_PARSE_C !MBEDTLS_FS_IO - ssl-opt.sh (ASan build)" # ~ 6 min
+ tests/ssl-opt.sh
+}
+
+component_test_rsa_no_crt () {
+ msg "build: Default + RSA_NO_CRT (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_RSA_NO_CRT
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: RSA_NO_CRT - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: RSA_NO_CRT - RSA-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ tests/ssl-opt.sh -f RSA
+
+ msg "test: RSA_NO_CRT - RSA-related part of compat.sh (ASan build)" # ~ 3 min
+ tests/compat.sh -t RSA
+
+ msg "test: RSA_NO_CRT - RSA-related part of context-info.sh (ASan build)" # ~ 15 sec
+ tests/context-info.sh
+}
+
+component_test_no_ctr_drbg_classic () {
+ msg "build: Full minus CTR_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus CTR_DRBG, classic crypto - main suites"
+ make test
+
+ # In this configuration, the TLS test programs use HMAC_DRBG.
+ # The SSL tests are slow, so run a small subset, just enough to get
+ # confidence that the SSL code copes with HMAC_DRBG.
+ msg "test: Full minus CTR_DRBG, classic crypto - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default\|SSL async private.*delay=\|tickets enabled on server'
+
+ msg "test: Full minus CTR_DRBG, classic crypto - compat.sh (subset)"
+ tests/compat.sh -m tls12 -t 'ECDSA PSK' -V NO -p OpenSSL
+}
+
+component_test_no_ctr_drbg_use_psa () {
+ msg "build: Full minus CTR_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - main suites"
+ make test
+
+ # In this configuration, the TLS test programs use HMAC_DRBG.
+ # The SSL tests are slow, so run a small subset, just enough to get
+ # confidence that the SSL code copes with HMAC_DRBG.
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default\|SSL async private.*delay=\|tickets enabled on server'
+
+ msg "test: Full minus CTR_DRBG, USE_PSA_CRYPTO - compat.sh (subset)"
+ tests/compat.sh -m tls12 -t 'ECDSA PSK' -V NO -p OpenSSL
+}
+
+component_test_no_hmac_drbg_classic () {
+ msg "build: Full minus HMAC_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus HMAC_DRBG, classic crypto - main suites"
+ make test
+
+ # Normally our ECDSA implementation uses deterministic ECDSA. But since
+ # HMAC_DRBG is disabled in this configuration, randomized ECDSA is used
+ # instead.
+ # Test SSL with non-deterministic ECDSA. Only test features that
+ # might be affected by how ECDSA signature is performed.
+ msg "test: Full minus HMAC_DRBG, classic crypto - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default\|SSL async private: sign'
+
+ # To save time, only test one protocol version, since this part of
+ # the protocol is identical in (D)TLS up to 1.2.
+ msg "test: Full minus HMAC_DRBG, classic crypto - compat.sh (ECDSA)"
+ tests/compat.sh -m tls12 -t 'ECDSA'
+}
+
+component_test_no_hmac_drbg_use_psa () {
+ msg "build: Full minus HMAC_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - main suites"
+ make test
+
+ # Normally our ECDSA implementation uses deterministic ECDSA. But since
+ # HMAC_DRBG is disabled in this configuration, randomized ECDSA is used
+ # instead.
+ # Test SSL with non-deterministic ECDSA. Only test features that
+ # might be affected by how ECDSA signature is performed.
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default\|SSL async private: sign'
+
+ # To save time, only test one protocol version, since this part of
+ # the protocol is identical in (D)TLS up to 1.2.
+ msg "test: Full minus HMAC_DRBG, USE_PSA_CRYPTO - compat.sh (ECDSA)"
+ tests/compat.sh -m tls12 -t 'ECDSA'
+}
+
+component_test_psa_external_rng_no_drbg_classic () {
+ msg "build: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto in TLS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py unset MBEDTLS_ENTROPY_C
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ # When MBEDTLS_USE_PSA_CRYPTO is disabled and there is no DRBG,
+ # the SSL test programs don't have an RNG and can't work. Explicitly
+ # make them use the PSA RNG with -DMBEDTLS_TEST_USE_PSA_CRYPTO_RNG.
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DMBEDTLS_TEST_USE_PSA_CRYPTO_RNG" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto - main suites"
+ make test
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, classic crypto - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default'
+}
+
+component_test_psa_external_rng_no_drbg_use_psa () {
+ msg "build: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto in TLS"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py unset MBEDTLS_ENTROPY_C
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # requires HMAC_DRBG
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto - main suites"
+ make test
+
+ msg "test: PSA_CRYPTO_EXTERNAL_RNG minus *_DRBG, PSA crypto - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f 'Default\|opaque'
+}
+
+component_test_psa_external_rng_use_psa_crypto () {
+ msg "build: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ make test
+
+ msg "test: full + PSA_CRYPTO_EXTERNAL_RNG + USE_PSA_CRYPTO minus CTR_DRBG"
+ tests/ssl-opt.sh -f 'Default\|opaque'
+}
+
+component_test_psa_inject_entropy () {
+ msg "build: full + MBEDTLS_PSA_INJECT_ENTROPY"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PSA_INJECT_ENTROPY
+ scripts/config.py set MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py set MBEDTLS_NO_DEFAULT_ENTROPY_SOURCES
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ scripts/config.py unset MBEDTLS_PLATFORM_STD_NV_SEED_READ
+ scripts/config.py unset MBEDTLS_PLATFORM_STD_NV_SEED_WRITE
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS '-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/user-config-for-test.h\"'" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full + MBEDTLS_PSA_INJECT_ENTROPY"
+ make test
+}
+
+component_test_sw_inet_pton () {
+ msg "build: default plus MBEDTLS_TEST_SW_INET_PTON"
+
+ # MBEDTLS_TEST_HOOKS required for x509_crt_parse_cn_inet_pton
+ scripts/config.py set MBEDTLS_TEST_HOOKS
+ make CFLAGS="-DMBEDTLS_TEST_SW_INET_PTON"
+
+ msg "test: default plus MBEDTLS_TEST_SW_INET_PTON"
+ make test
+}
+
+component_full_no_pkparse_pkwrite() {
+ msg "build: full without pkparse and pkwrite"
+
+ scripts/config.py crypto_full
+ scripts/config.py unset MBEDTLS_PK_PARSE_C
+ scripts/config.py unset MBEDTLS_PK_WRITE_C
+
+ make CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ # Ensure that PK_[PARSE|WRITE]_C were not re-enabled accidentally (additive config).
+ not grep mbedtls_pk_parse_key library/pkparse.o
+ not grep mbedtls_pk_write_key_der library/pkwrite.o
+
+ msg "test: full without pkparse and pkwrite"
+ make test
+}
+
+component_test_crypto_full_md_light_only () {
+ msg "build: crypto_full with only the light subset of MD"
+ scripts/config.py crypto_full
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CONFIG
+ # Disable MD
+ scripts/config.py unset MBEDTLS_MD_C
+ # Disable direct dependencies of MD_C
+ scripts/config.py unset MBEDTLS_HKDF_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ # Disable indirect dependencies of MD_C
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC # needs HMAC_DRBG
+ # Disable things that would auto-enable MD_C
+ scripts/config.py unset MBEDTLS_PKCS5_C
+
+ # Note: MD-light is auto-enabled in build_info.h by modules that need it,
+ # which we haven't disabled, so no need to explicitly enable it.
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ # Make sure we don't have the HMAC functions, but the hashing functions
+ not grep mbedtls_md_hmac library/md.o
+ grep mbedtls_md library/md.o
+
+ msg "test: crypto_full with only the light subset of MD"
+ make test
+}
+
+component_test_full_no_cipher_no_psa_crypto () {
+ msg "build: full no CIPHER no PSA_CRYPTO_C"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CIPHER_C
+ # Don't pull in cipher via PSA mechanisms
+ # (currently ignored anyway because we completely disable PSA)
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CONFIG
+ # Disable features that depend on CIPHER_C
+ scripts/config.py unset MBEDTLS_CMAC_C
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CLIENT
+ scripts/config.py unset MBEDTLS_SSL_TLS_C
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+ # Disable features that depend on PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+
+ msg "test: full no CIPHER no PSA_CRYPTO_C"
+ make test
+}
+
+# This is a common configurator and test function that is used in:
+# - component_test_full_no_cipher_with_psa_crypto
+# - component_test_full_no_cipher_with_psa_crypto_config
+# It accepts 2 input parameters:
+# - $1: boolean value which basically reflects status of MBEDTLS_PSA_CRYPTO_CONFIG
+# - $2: a text string which describes the test component
+common_test_full_no_cipher_with_psa_crypto () {
+ USE_CRYPTO_CONFIG="$1"
+ COMPONENT_DESCRIPTION="$2"
+
+ msg "build: $COMPONENT_DESCRIPTION"
+
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CIPHER_C
+
+ if [ "$USE_CRYPTO_CONFIG" -eq 1 ]; then
+ # The built-in implementation of the following algs/key-types depends
+ # on CIPHER_C so we disable them.
+ # This does not hold for KEY_TYPE_CHACHA20 and ALG_CHACHA20_POLY1305
+ # so we keep them enabled.
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CMAC
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CBC_NO_PADDING
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CBC_PKCS7
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CFB
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_CTR
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_ECB_NO_PADDING
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_OFB
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_ALG_STREAM_CIPHER
+ scripts/config.py -f $CRYPTO_CONFIG_H unset PSA_WANT_KEY_TYPE_DES
+ else
+ # Don't pull in cipher via PSA mechanisms
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CONFIG
+ # Disable cipher modes/keys that make PSA depend on CIPHER_C.
+ # Keep CHACHA20 and CHACHAPOLY enabled since they do not depend on CIPHER_C.
+ scripts/config.py unset-all MBEDTLS_CIPHER_MODE
+ fi
+ # The following modules directly depends on CIPHER_C
+ scripts/config.py unset MBEDTLS_CMAC_C
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+
+ make
+
+ # Ensure that CIPHER_C was not re-enabled
+ not grep mbedtls_cipher_init library/cipher.o
+
+ msg "test: $COMPONENT_DESCRIPTION"
+ make test
+}
+
+component_test_full_no_cipher_with_psa_crypto() {
+ common_test_full_no_cipher_with_psa_crypto 0 "full no CIPHER no CRYPTO_CONFIG"
+}
+
+component_test_full_no_cipher_with_psa_crypto_config() {
+ common_test_full_no_cipher_with_psa_crypto 1 "full no CIPHER"
+}
+
+component_test_full_no_ccm() {
+ msg "build: full no PSA_WANT_ALG_CCM"
+
+ # Full config enables:
+ # - USE_PSA_CRYPTO so that TLS code dispatches cipher/AEAD to PSA
+ # - CRYPTO_CONFIG so that PSA_WANT config symbols are evaluated
+ scripts/config.py full
+
+ # Disable PSA_WANT_ALG_CCM so that CCM is not supported in PSA. CCM_C is still
+ # enabled, but not used from TLS since USE_PSA is set.
+ # This is helpful to ensure that TLS tests below have proper dependencies.
+ #
+ # Note: also PSA_WANT_ALG_CCM_STAR_NO_TAG is enabled, but it does not cause
+ # PSA_WANT_ALG_CCM to be re-enabled.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CCM
+
+ make
+
+ msg "test: full no PSA_WANT_ALG_CCM"
+ make test
+}
+
+component_test_full_no_ccm_star_no_tag() {
+ msg "build: full no PSA_WANT_ALG_CCM_STAR_NO_TAG"
+
+ # Full config enables CRYPTO_CONFIG so that PSA_WANT config symbols are evaluated
+ scripts/config.py full
+
+ # Disable CCM_STAR_NO_TAG, which is the target of this test, as well as all
+ # other components that enable MBEDTLS_PSA_BUILTIN_CIPHER internal symbol.
+ # This basically disables all unauthenticated ciphers on the PSA side, while
+ # keeping AEADs enabled.
+ #
+ # Note: PSA_WANT_ALG_CCM is enabled, but it does not cause
+ # PSA_WANT_ALG_CCM_STAR_NO_TAG to be re-enabled.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_STREAM_CIPHER
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CTR
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CFB
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_OFB
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_ECB_NO_PADDING
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_NO_PADDING
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_PKCS7
+
+ make
+
+ # Ensure MBEDTLS_PSA_BUILTIN_CIPHER was not enabled
+ not grep mbedtls_psa_cipher library/psa_crypto_cipher.o
+
+ msg "test: full no PSA_WANT_ALG_CCM_STAR_NO_TAG"
+ make test
+}
+
+component_test_full_no_bignum () {
+ msg "build: full minus bignum"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_BIGNUM_C
+ # Direct dependencies of bignum
+ scripts/config.py unset MBEDTLS_ECP_C
+ scripts/config.py unset MBEDTLS_RSA_C
+ scripts/config.py unset MBEDTLS_DHM_C
+ # Direct dependencies of ECP
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+ # Disable what auto-enables ECP_LIGHT
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_EXTENDED
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_COMPRESSED
+ # Indirect dependencies of ECP
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED
+ # Direct dependencies of DHM
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED
+ # Direct dependencies of RSA
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ # PK and its dependencies
+ scripts/config.py unset MBEDTLS_PK_C
+ scripts/config.py unset MBEDTLS_PK_PARSE_C
+ scripts/config.py unset MBEDTLS_PK_WRITE_C
+ scripts/config.py unset MBEDTLS_X509_USE_C
+ scripts/config.py unset MBEDTLS_X509_CRT_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_CRL_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_CSR_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_CREATE_C
+ scripts/config.py unset MBEDTLS_X509_CRT_WRITE_C
+ scripts/config.py unset MBEDTLS_X509_CSR_WRITE_C
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py unset MBEDTLS_SSL_SERVER_NAME_INDICATION
+ scripts/config.py unset MBEDTLS_SSL_ASYNC_PRIVATE
+ scripts/config.py unset MBEDTLS_X509_TRUSTED_CERTIFICATE_CALLBACK
+
+ make
+
+ msg "test: full minus bignum"
+ make test
+}
+
+component_test_tls1_2_default_stream_cipher_only () {
+ msg "build: default with only stream cipher"
+
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Disable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Enable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py set MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only stream cipher"
+ make test
+
+ # Not running ssl-opt.sh because most tests require a non-NULL ciphersuite.
+}
+
+component_test_tls1_2_default_stream_cipher_only_use_psa () {
+ msg "build: default with only stream cipher use psa"
+
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Disable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Enable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py set MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only stream cipher use psa"
+ make test
+
+ # Not running ssl-opt.sh because most tests require a non-NULL ciphersuite.
+}
+
+component_test_tls1_2_default_cbc_legacy_cipher_only () {
+ msg "build: default with only CBC-legacy cipher"
+
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+ # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only CBC-legacy cipher"
+ make test
+
+ msg "test: default with only CBC-legacy cipher - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f "TLS 1.2"
+}
+
+component_test_tls1_2_deafult_cbc_legacy_cipher_only_use_psa () {
+ msg "build: default with only CBC-legacy cipher use psa"
+
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+ # Disable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py unset MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only CBC-legacy cipher use psa"
+ make test
+
+ msg "test: default with only CBC-legacy cipher use psa - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f "TLS 1.2"
+}
+
+component_test_tls1_2_default_cbc_legacy_cbc_etm_cipher_only () {
+ msg "build: default with only CBC-legacy and CBC-EtM ciphers"
+
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+ # Enable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py set MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only CBC-legacy and CBC-EtM ciphers"
+ make test
+
+ msg "test: default with only CBC-legacy and CBC-EtM ciphers - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f "TLS 1.2"
+}
+
+component_test_tls1_2_default_cbc_legacy_cbc_etm_cipher_only_use_psa () {
+ msg "build: default with only CBC-legacy and CBC-EtM ciphers use psa"
+
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ # Disable AEAD (controlled by the presence of one of GCM_C, CCM_C, CHACHAPOLY_C)
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ #Disable TLS 1.3 (as no AEAD)
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Enable CBC-legacy (controlled by MBEDTLS_CIPHER_MODE_CBC plus at least one block cipher (AES, ARIA, Camellia, DES))
+ scripts/config.py set MBEDTLS_CIPHER_MODE_CBC
+ # Enable CBC-EtM (controlled by the same as CBC-legacy plus MBEDTLS_SSL_ENCRYPT_THEN_MAC)
+ scripts/config.py set MBEDTLS_SSL_ENCRYPT_THEN_MAC
+ # Disable stream (currently that's just the NULL pseudo-cipher (controlled by MBEDTLS_CIPHER_NULL_CIPHER))
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ # Modules that depend on AEAD
+ scripts/config.py unset MBEDTLS_SSL_CONTEXT_SERIALIZATION
+ scripts/config.py unset MBEDTLS_SSL_TICKET_C
+
+ make
+
+ msg "test: default with only CBC-legacy and CBC-EtM ciphers use psa"
+ make test
+
+ msg "test: default with only CBC-legacy and CBC-EtM ciphers use psa - ssl-opt.sh (subset)"
+ tests/ssl-opt.sh -f "TLS 1.2"
+}
+
+# We're not aware of any other (open source) implementation of EC J-PAKE in TLS
+# that we could use for interop testing. However, we now have sort of two
+# implementations ourselves: one using PSA, the other not. At least test that
+# these two interoperate with each other.
+component_test_tls1_2_ecjpake_compatibility() {
+ msg "build: TLS1.2 server+client w/ EC-JPAKE w/o USE_PSA"
+ scripts/config.py set MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED
+ # Explicitly make lib first to avoid a race condition:
+ # https://github.com/Mbed-TLS/mbedtls/issues/8229
+ make lib
+ make -C programs ssl/ssl_server2 ssl/ssl_client2
+ cp programs/ssl/ssl_server2 s2_no_use_psa
+ cp programs/ssl/ssl_client2 c2_no_use_psa
+
+ msg "build: TLS1.2 server+client w/ EC-JPAKE w/ USE_PSA"
+ scripts/config.py set MBEDTLS_USE_PSA_CRYPTO
+ make clean
+ make lib
+ make -C programs ssl/ssl_server2 ssl/ssl_client2
+ make -C programs test/udp_proxy test/query_compile_time_config
+
+ msg "test: server w/o USE_PSA - client w/ USE_PSA, text password"
+ P_SRV=../s2_no_use_psa tests/ssl-opt.sh -f "ECJPAKE: working, TLS"
+ msg "test: server w/o USE_PSA - client w/ USE_PSA, opaque password"
+ P_SRV=../s2_no_use_psa tests/ssl-opt.sh -f "ECJPAKE: opaque password client only, working, TLS"
+ msg "test: client w/o USE_PSA - server w/ USE_PSA, text password"
+ P_CLI=../c2_no_use_psa tests/ssl-opt.sh -f "ECJPAKE: working, TLS"
+ msg "test: client w/o USE_PSA - server w/ USE_PSA, opaque password"
+ P_CLI=../c2_no_use_psa tests/ssl-opt.sh -f "ECJPAKE: opaque password server only, working, TLS"
+
+ rm s2_no_use_psa c2_no_use_psa
+}
+
+component_test_everest () {
+ msg "build: Everest ECDH context (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: Everest ECDH context - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: metatests (clang, ASan)"
+ tests/scripts/run-metatests.sh any asan poison
+
+ msg "test: Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ tests/ssl-opt.sh -f ECDH
+
+ msg "test: Everest ECDH context - compat.sh with some ECDH ciphersuites (ASan build)" # ~ 3 min
+ # Exclude some symmetric ciphers that are redundant here to gain time.
+ tests/compat.sh -f ECDH -V NO -e 'ARIA\|CAMELLIA\|CHACHA'
+}
+
+component_test_everest_curve25519_only () {
+ msg "build: Everest ECDH context, only Curve25519" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ # Disable all curves
+ scripts/config.py unset-all "MBEDTLS_ECP_DP_[0-9A-Z_a-z]*_ENABLED"
+ scripts/config.py set MBEDTLS_ECP_DP_CURVE25519_ENABLED
+
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: Everest ECDH context, only Curve25519" # ~ 50s
+ make test
+}
+
+component_test_small_ssl_out_content_len () {
+ msg "build: small SSL_OUT_CONTENT_LEN (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 16384
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 4096
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small SSL_OUT_CONTENT_LEN - ssl-opt.sh MFL and large packet tests"
+ tests/ssl-opt.sh -f "Max fragment\|Large packet"
+}
+
+component_test_small_ssl_in_content_len () {
+ msg "build: small SSL_IN_CONTENT_LEN (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 4096
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 16384
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small SSL_IN_CONTENT_LEN - ssl-opt.sh MFL tests"
+ tests/ssl-opt.sh -f "Max fragment"
+}
+
+component_test_small_ssl_dtls_max_buffering () {
+ msg "build: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #0"
+ scripts/config.py set MBEDTLS_SSL_DTLS_MAX_BUFFERING 1000
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #0 - ssl-opt.sh specific reordering test"
+ tests/ssl-opt.sh -f "DTLS reordering: Buffer out-of-order hs msg before reassembling next, free buffered msg"
+}
+
+component_test_small_mbedtls_ssl_dtls_max_buffering () {
+ msg "build: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #1"
+ scripts/config.py set MBEDTLS_SSL_DTLS_MAX_BUFFERING 190
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: small MBEDTLS_SSL_DTLS_MAX_BUFFERING #1 - ssl-opt.sh specific reordering test"
+ tests/ssl-opt.sh -f "DTLS reordering: Buffer encrypted Finished message, drop for fragmented NewSessionTicket"
+}
+
+component_test_psa_collect_statuses () {
+ msg "build+test: psa_collect_statuses" # ~30s
+ scripts/config.py full
+ tests/scripts/psa_collect_statuses.py
+ # Check that psa_crypto_init() succeeded at least once
+ grep -q '^0:psa_crypto_init:' tests/statuses.log
+ rm -f tests/statuses.log
+}
+
+component_test_full_cmake_clang () {
+ msg "build: cmake, full config, clang" # ~ 50s
+ scripts/config.py full
+ CC=clang CXX=clang cmake -D CMAKE_BUILD_TYPE:String=Release -D ENABLE_TESTING=On -D TEST_CPP=1 .
+ make
+
+ msg "test: main suites (full config, clang)" # ~ 5s
+ make test
+
+ msg "test: cpp_dummy_build (full config, clang)" # ~ 1s
+ programs/test/cpp_dummy_build
+
+ msg "test: metatests (clang)"
+ tests/scripts/run-metatests.sh any pthread
+
+ msg "program demos (full config, clang)" # ~10s
+ tests/scripts/run_demos.py
+
+ msg "test: psa_constant_names (full config, clang)" # ~ 1s
+ tests/scripts/test_psa_constant_names.py
+
+ msg "test: ssl-opt.sh default, ECJPAKE, SSL async (full config)" # ~ 1s
+ tests/ssl-opt.sh -f 'Default\|ECJPAKE\|SSL async private'
+
+ msg "test: compat.sh NULL (full config)" # ~ 2 min
+ tests/compat.sh -e '^$' -f 'NULL'
+
+ msg "test: compat.sh ARIA + ChachaPoly"
+ env OPENSSL="$OPENSSL_NEXT" tests/compat.sh -e '^$' -f 'ARIA\|CHACHA'
+}
+
+skip_suites_without_constant_flow () {
+ # Skip the test suites that don't have any constant-flow annotations.
+ # This will need to be adjusted if we ever start declaring things as
+ # secret from macros or functions inside tests/include or tests/src.
+ SKIP_TEST_SUITES=$(
+ git -C tests/suites grep -L TEST_CF_ 'test_suite_*.function' |
+ sed 's/test_suite_//; s/\.function$//' |
+ tr '\n' ,)
+ export SKIP_TEST_SUITES
+}
+
+skip_all_except_given_suite () {
+ # Skip all but the given test suite
+ SKIP_TEST_SUITES=$(
+ ls -1 tests/suites/test_suite_*.function |
+ grep -v $1.function |
+ sed 's/tests.suites.test_suite_//; s/\.function$//' |
+ tr '\n' ,)
+ export SKIP_TEST_SUITES
+}
+
+component_test_memsan_constant_flow () {
+ # This tests both (1) accesses to undefined memory, and (2) branches or
+ # memory access depending on secret values. To distinguish between those:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN - does the failure persist?
+ # - or alternatively, change the build type to MemSanDbg, which enables
+ # origin tracking and nicer stack traces (which are useful for debugging
+ # anyway), and check if the origin was TEST_CF_SECRET() or something else.
+ msg "build: cmake MSan (clang), full config minus MBEDTLS_USE_PSA_CRYPTO with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_AESNI_C # memsan doesn't grok asm
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=MemSan .
+ make
+
+ msg "test: main suites (full minus MBEDTLS_USE_PSA_CRYPTO, Msan + constant flow)"
+ make test
+}
+
+component_test_memsan_constant_flow_psa () {
+ # This tests both (1) accesses to undefined memory, and (2) branches or
+ # memory access depending on secret values. To distinguish between those:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN - does the failure persist?
+ # - or alternatively, change the build type to MemSanDbg, which enables
+ # origin tracking and nicer stack traces (which are useful for debugging
+ # anyway), and check if the origin was TEST_CF_SECRET() or something else.
+ msg "build: cmake MSan (clang), full config with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_MEMSAN
+ scripts/config.py unset MBEDTLS_AESNI_C # memsan doesn't grok asm
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=MemSan .
+ make
+
+ msg "test: main suites (Msan + constant flow)"
+ make test
+}
+
+component_release_test_valgrind_constant_flow () {
+ # This tests both (1) everything that valgrind's memcheck usually checks
+ # (heap buffer overflows, use of uninitialized memory, use-after-free,
+ # etc.) and (2) branches or memory access depending on secret values,
+ # which will be reported as uninitialized memory. To distinguish between
+ # secret and actually uninitialized:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND - does the failure persist?
+ # - or alternatively, build with debug info and manually run the offending
+ # test suite with valgrind --track-origins=yes, then check if the origin
+ # was TEST_CF_SECRET() or something else.
+ msg "build: cmake release GCC, full config minus MBEDTLS_USE_PSA_CRYPTO with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ skip_suites_without_constant_flow
+ cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make
+
+ # this only shows a summary of the results (how many of each type)
+ # details are left in Testing/<date>/DynamicAnalysis.xml
+ msg "test: some suites (full minus MBEDTLS_USE_PSA_CRYPTO, valgrind + constant flow)"
+ make memcheck
+
+ # Test asm path in constant time module - by default, it will test the plain C
+ # path under Valgrind or Memsan. Running only the constant_time tests is fast (<1s)
+ msg "test: valgrind asm constant_time"
+ scripts/config.py --force set MBEDTLS_TEST_CONSTANT_FLOW_ASM
+ skip_all_except_given_suite test_suite_constant_time
+ cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make clean
+ make
+ make memcheck
+}
+
+component_release_test_valgrind_constant_flow_psa () {
+ # This tests both (1) everything that valgrind's memcheck usually checks
+ # (heap buffer overflows, use of uninitialized memory, use-after-free,
+ # etc.) and (2) branches or memory access depending on secret values,
+ # which will be reported as uninitialized memory. To distinguish between
+ # secret and actually uninitialized:
+ # - unset MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND - does the failure persist?
+ # - or alternatively, build with debug info and manually run the offending
+ # test suite with valgrind --track-origins=yes, then check if the origin
+ # was TEST_CF_SECRET() or something else.
+ msg "build: cmake release GCC, full config with constant flow testing"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_TEST_CONSTANT_FLOW_VALGRIND
+ skip_suites_without_constant_flow
+ cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make
+
+ # this only shows a summary of the results (how many of each type)
+ # details are left in Testing/<date>/DynamicAnalysis.xml
+ msg "test: some suites (valgrind + constant flow)"
+ make memcheck
+}
+
+component_test_tsan () {
+ msg "build: TSan (clang)"
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_THREADING_C
+ scripts/config.py set MBEDTLS_THREADING_PTHREAD
+ # Self-tests do not currently use multiple threads.
+ scripts/config.py unset MBEDTLS_SELF_TEST
+
+ # The deprecated MBEDTLS_PSA_CRYPTO_SE_C interface is not thread safe.
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=TSan .
+ make
+
+ msg "test: main suites (TSan)"
+ make test
+}
+
+component_test_default_no_deprecated () {
+ # Test that removing the deprecated features from the default
+ # configuration leaves something consistent.
+ msg "build: make, default + MBEDTLS_DEPRECATED_REMOVED" # ~ 30s
+ scripts/config.py set MBEDTLS_DEPRECATED_REMOVED
+ make CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, default + MBEDTLS_DEPRECATED_REMOVED" # ~ 5s
+ make test
+}
+
+component_test_full_no_deprecated () {
+ msg "build: make, full_no_deprecated config" # ~ 30s
+ scripts/config.py full_no_deprecated
+ make CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, full_no_deprecated config" # ~ 5s
+ make test
+
+ msg "test: ensure that X509 has no direct dependency on BIGNUM_C"
+ not grep mbedtls_mpi library/libmbedx509.a
+}
+
+component_test_full_no_deprecated_deprecated_warning () {
+ # Test that there is nothing deprecated in "full_no_deprecated".
+ # A deprecated feature would trigger a warning (made fatal) from
+ # MBEDTLS_DEPRECATED_WARNING.
+ msg "build: make, full_no_deprecated config, MBEDTLS_DEPRECATED_WARNING" # ~ 30s
+ scripts/config.py full_no_deprecated
+ scripts/config.py unset MBEDTLS_DEPRECATED_REMOVED
+ scripts/config.py set MBEDTLS_DEPRECATED_WARNING
+ make CFLAGS='-O -Werror -Wall -Wextra'
+
+ msg "test: make, full_no_deprecated config, MBEDTLS_DEPRECATED_WARNING" # ~ 5s
+ make test
+}
+
+component_test_full_deprecated_warning () {
+ # Test that when MBEDTLS_DEPRECATED_WARNING is enabled, the build passes
+ # with only certain whitelisted types of warnings.
+ msg "build: make, full config + MBEDTLS_DEPRECATED_WARNING, expect warnings" # ~ 30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_DEPRECATED_WARNING
+ # Expect warnings from '#warning' directives in check_config.h.
+ # Note that gcc is required to allow the use of -Wno-error=cpp, which allows us to
+ # display #warning messages without them being treated as errors.
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra -Wno-error=cpp' lib programs
+
+ msg "build: make tests, full config + MBEDTLS_DEPRECATED_WARNING, expect warnings" # ~ 30s
+ # Set MBEDTLS_TEST_DEPRECATED to enable tests for deprecated features.
+ # By default those are disabled when MBEDTLS_DEPRECATED_WARNING is set.
+ # Expect warnings from '#warning' directives in check_config.h and
+ # from the use of deprecated functions in test suites.
+ make CC=gcc CFLAGS='-O -Werror -Wall -Wextra -Wno-error=deprecated-declarations -Wno-error=cpp -DMBEDTLS_TEST_DEPRECATED' tests
+
+ msg "test: full config + MBEDTLS_TEST_DEPRECATED" # ~ 30s
+ make test
+
+ msg "program demos: full config + MBEDTLS_TEST_DEPRECATED" # ~10s
+ tests/scripts/run_demos.py
+}
+
+# Check that the specified libraries exist and are empty.
+are_empty_libraries () {
+ nm "$@" >/dev/null 2>/dev/null
+ ! nm "$@" 2>/dev/null | grep -v ':$' | grep .
+}
+
+component_build_crypto_default () {
+ msg "build: make, crypto only"
+ scripts/config.py crypto
+ make CFLAGS='-O1 -Werror'
+ are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_build_crypto_full () {
+ msg "build: make, crypto only, full config"
+ scripts/config.py crypto_full
+ make CFLAGS='-O1 -Werror'
+ are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_test_crypto_for_psa_service () {
+ msg "build: make, config for PSA crypto service"
+ scripts/config.py crypto
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
+ # Disable things that are not needed for just cryptography, to
+ # reach a configuration that would be typical for a PSA cryptography
+ # service providing all implemented PSA algorithms.
+ # System stuff
+ scripts/config.py unset MBEDTLS_ERROR_C
+ scripts/config.py unset MBEDTLS_TIMING_C
+ scripts/config.py unset MBEDTLS_VERSION_FEATURES
+ # Crypto stuff with no PSA interface
+ scripts/config.py unset MBEDTLS_BASE64_C
+ # Keep MBEDTLS_CIPHER_C because psa_crypto_cipher, CCM and GCM need it.
+ scripts/config.py unset MBEDTLS_HKDF_C # PSA's HKDF is independent
+ # Keep MBEDTLS_MD_C because deterministic ECDSA needs it for HMAC_DRBG.
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+ scripts/config.py unset MBEDTLS_PEM_PARSE_C
+ scripts/config.py unset MBEDTLS_PEM_WRITE_C
+ scripts/config.py unset MBEDTLS_PKCS12_C
+ scripts/config.py unset MBEDTLS_PKCS5_C
+ # MBEDTLS_PK_PARSE_C and MBEDTLS_PK_WRITE_C are actually currently needed
+ # in PSA code to work with RSA keys. We don't require users to set those:
+ # they will be reenabled in build_info.h.
+ scripts/config.py unset MBEDTLS_PK_C
+ scripts/config.py unset MBEDTLS_PK_PARSE_C
+ scripts/config.py unset MBEDTLS_PK_WRITE_C
+ make CFLAGS='-O1 -Werror' all test
+ are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+
+component_build_crypto_baremetal () {
+ msg "build: make, crypto only, baremetal config"
+ scripts/config.py crypto_baremetal
+ make CFLAGS="-O1 -Werror -I$PWD/tests/include/baremetal-override/"
+ are_empty_libraries library/libmbedx509.* library/libmbedtls.*
+}
+support_build_crypto_baremetal () {
+ support_build_baremetal "$@"
+}
+
+component_build_baremetal () {
+ msg "build: make, baremetal config"
+ scripts/config.py baremetal
+ make CFLAGS="-O1 -Werror -I$PWD/tests/include/baremetal-override/"
+}
+support_build_baremetal () {
+ # Older Glibc versions include time.h from other headers such as stdlib.h,
+ # which makes the no-time.h-in-baremetal check fail. Ubuntu 16.04 has this
+ # problem, Ubuntu 18.04 is ok.
+ ! grep -q -F time.h /usr/include/x86_64-linux-gnu/sys/types.h
+}
+
+# depends.py family of tests
+component_test_depends_py_cipher_id () {
+ msg "test/build: depends.py cipher_id (gcc)"
+ tests/scripts/depends.py cipher_id --unset-use-psa
+}
+
+component_test_depends_py_cipher_chaining () {
+ msg "test/build: depends.py cipher_chaining (gcc)"
+ tests/scripts/depends.py cipher_chaining --unset-use-psa
+}
+
+component_test_depends_py_cipher_padding () {
+ msg "test/build: depends.py cipher_padding (gcc)"
+ tests/scripts/depends.py cipher_padding --unset-use-psa
+}
+
+component_test_depends_py_curves () {
+ msg "test/build: depends.py curves (gcc)"
+ tests/scripts/depends.py curves --unset-use-psa
+}
+
+component_test_depends_py_hashes () {
+ msg "test/build: depends.py hashes (gcc)"
+ tests/scripts/depends.py hashes --unset-use-psa
+}
+
+component_test_depends_py_kex () {
+ msg "test/build: depends.py kex (gcc)"
+ tests/scripts/depends.py kex --unset-use-psa
+}
+
+component_test_depends_py_pkalgs () {
+ msg "test/build: depends.py pkalgs (gcc)"
+ tests/scripts/depends.py pkalgs --unset-use-psa
+}
+
+# PSA equivalents of the depends.py tests
+component_test_depends_py_cipher_id_psa () {
+ msg "test/build: depends.py cipher_id (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py cipher_id
+}
+
+component_test_depends_py_cipher_chaining_psa () {
+ msg "test/build: depends.py cipher_chaining (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py cipher_chaining
+}
+
+component_test_depends_py_cipher_padding_psa () {
+ msg "test/build: depends.py cipher_padding (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py cipher_padding
+}
+
+component_test_depends_py_curves_psa () {
+ msg "test/build: depends.py curves (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py curves
+}
+
+component_test_depends_py_hashes_psa () {
+ msg "test/build: depends.py hashes (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py hashes
+}
+
+component_test_depends_py_kex_psa () {
+ msg "test/build: depends.py kex (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py kex
+}
+
+component_test_depends_py_pkalgs_psa () {
+ msg "test/build: depends.py pkalgs (gcc) with MBEDTLS_USE_PSA_CRYPTO defined"
+ tests/scripts/depends.py pkalgs
+}
+
+component_test_psa_crypto_config_ffdh_2048_only () {
+ msg "build: full config - only DH 2048"
+
+ scripts/config.py full
+
+ # Disable all DH groups other than 2048.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_DH_RFC7919_3072
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_DH_RFC7919_4096
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_DH_RFC7919_6144
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_DH_RFC7919_8192
+
+ make CFLAGS="$ASAN_CFLAGS -Werror" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full config - only DH 2048"
+ make test
+
+ msg "ssl-opt: full config - only DH 2048"
+ tests/ssl-opt.sh -f "ffdh"
+}
+
+component_build_no_pk_rsa_alt_support () {
+ msg "build: !MBEDTLS_PK_RSA_ALT_SUPPORT" # ~30s
+
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_PK_RSA_ALT_SUPPORT
+ scripts/config.py set MBEDTLS_RSA_C
+ scripts/config.py set MBEDTLS_X509_CRT_WRITE_C
+
+ # Only compile - this is primarily to test for compile issues
+ make CFLAGS='-Werror -Wall -Wextra -I../tests/include/alt-dummy'
+}
+
+component_build_module_alt () {
+ msg "build: MBEDTLS_XXX_ALT" # ~30s
+ scripts/config.py full
+
+ # Disable options that are incompatible with some ALT implementations:
+ # aesni.c and padlock.c reference mbedtls_aes_context fields directly.
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+ # MBEDTLS_ECP_RESTARTABLE is documented as incompatible.
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+ # You can only have one threading implementation: alt or pthread, not both.
+ scripts/config.py unset MBEDTLS_THREADING_PTHREAD
+ # The SpecifiedECDomain parsing code accesses mbedtls_ecp_group fields
+ # directly and assumes the implementation works with partial groups.
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_EXTENDED
+ # MBEDTLS_SHA256_*ALT can't be used with MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_*
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY
+ # MBEDTLS_SHA512_*ALT can't be used with MBEDTLS_SHA512_USE_A64_CRYPTO_*
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY
+
+ # Enable all MBEDTLS_XXX_ALT for whole modules. Do not enable
+ # MBEDTLS_XXX_YYY_ALT which are for single functions.
+ scripts/config.py set-all 'MBEDTLS_([A-Z0-9]*|NIST_KW)_ALT'
+ scripts/config.py unset MBEDTLS_DHM_ALT #incompatible with MBEDTLS_DEBUG_C
+
+ # We can only compile, not link, since we don't have any implementations
+ # suitable for testing with the dummy alt headers.
+ make CFLAGS='-Werror -Wall -Wextra -I../tests/include/alt-dummy' lib
+}
+
+component_build_dhm_alt () {
+ msg "build: MBEDTLS_DHM_ALT" # ~30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_DHM_ALT
+ # debug.c currently references mbedtls_dhm_context fields directly.
+ scripts/config.py unset MBEDTLS_DEBUG_C
+ # We can only compile, not link, since we don't have any implementations
+ # suitable for testing with the dummy alt headers.
+ make CFLAGS='-Werror -Wall -Wextra -I../tests/include/alt-dummy' lib
+}
+
+component_test_no_psa_crypto_full_cmake_asan() {
+ # full minus MBEDTLS_PSA_CRYPTO_C: run the same set of tests as basic-build-test.sh
+ msg "build: cmake, full config minus PSA crypto, ASan"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CLIENT
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (full minus PSA crypto)"
+ make test
+
+ # Note: ssl-opt.sh has some test cases that depend on
+ # MBEDTLS_ECP_RESTARTABLE && !MBEDTLS_USE_PSA_CRYPTO
+ # This is the only component where those tests are not skipped.
+ msg "test: ssl-opt.sh (full minus PSA crypto)"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh default (full minus PSA crypto)"
+ tests/compat.sh
+
+ msg "test: compat.sh NULL (full minus PSA crypto)"
+ tests/compat.sh -f 'NULL'
+
+ msg "test: compat.sh ARIA + ChachaPoly (full minus PSA crypto)"
+ env OPENSSL="$OPENSSL_NEXT" tests/compat.sh -e '^$' -f 'ARIA\|CHACHA'
+}
+
+component_test_psa_crypto_config_accel_ecdsa () {
+ msg "build: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated ECDSA"
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ # Start from default config (no USE_PSA) + TLS 1.3
+ helper_libtestdriver1_adjust_config "default"
+
+ # Disable the module that's accelerated
+ scripts/config.py unset MBEDTLS_ECDSA_C
+
+ # Disable things that depend on it
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+
+ # Build
+ # -----
+
+ # These hashes are needed for some ECDSA signature tests.
+ loc_extra_list="ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdsa_ library/ecdsa.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated ECDSA"
+ make test
+}
+
+component_test_psa_crypto_config_accel_ecdh () {
+ msg "build: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated ECDH"
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_ECDH \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ # Start from default config (no USE_PSA)
+ helper_libtestdriver1_adjust_config "default"
+
+ # Disable the module that's accelerated
+ scripts/config.py unset MBEDTLS_ECDH_C
+
+ # Disable things that depend on it
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdh_ library/ecdh.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated ECDH"
+ make test
+}
+
+component_test_psa_crypto_config_accel_ffdh () {
+ msg "build: full with accelerated FFDH"
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_FFDH \
+ $(helper_get_psa_key_type_list "DH") \
+ $(helper_get_psa_dh_group_list)"
+
+ # Configure
+ # ---------
+
+ # start with full (USE_PSA and TLS 1.3)
+ helper_libtestdriver1_adjust_config "full"
+
+ # Disable the module that's accelerated
+ scripts/config.py unset MBEDTLS_DHM_C
+
+ # Disable things that depend on it
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_dhm_ library/dhm.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full with accelerated FFDH"
+ make test
+
+ msg "ssl-opt: full with accelerated FFDH alg"
+ tests/ssl-opt.sh -f "ffdh"
+}
+
+component_test_psa_crypto_config_reference_ffdh () {
+ msg "build: full with non-accelerated FFDH"
+
+ # Start with full (USE_PSA and TLS 1.3)
+ helper_libtestdriver1_adjust_config "full"
+
+ # Disable things that are not supported
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ make
+
+ msg "test suites: full with non-accelerated FFDH alg"
+ make test
+
+ msg "ssl-opt: full with non-accelerated FFDH alg"
+ tests/ssl-opt.sh -f "ffdh"
+}
+
+component_test_psa_crypto_config_accel_pake() {
+ msg "build: full with accelerated PAKE"
+
+ loc_accel_list="ALG_JPAKE \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ helper_libtestdriver1_adjust_config "full"
+
+ # Make built-in fallback not available
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_ecjpake_init library/ecjpake.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full with accelerated PAKE"
+ make test
+}
+
+component_test_psa_crypto_config_accel_ecc_some_key_types () {
+ msg "build: full with accelerated EC algs and some key types"
+
+ # Algorithms and key types to accelerate
+ # For key types, use an explicitly list to omit GENERATE (and DERIVE)
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ ALG_ECDH \
+ ALG_JPAKE \
+ KEY_TYPE_ECC_PUBLIC_KEY \
+ KEY_TYPE_ECC_KEY_PAIR_BASIC \
+ KEY_TYPE_ECC_KEY_PAIR_IMPORT \
+ KEY_TYPE_ECC_KEY_PAIR_EXPORT \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ # start with config full for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+
+ # Disable modules that are accelerated - some will be re-enabled
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ scripts/config.py unset MBEDTLS_ECP_C
+
+ # Disable all curves - those that aren't accelerated should be re-enabled
+ helper_disable_builtin_curves
+
+ # Restartable feature is not yet supported by PSA. Once it will in
+ # the future, the following line could be removed (see issues
+ # 6061, 6332 and following ones)
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+
+ # this is not supported by the driver API yet
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE
+
+ # Build
+ # -----
+
+ # These hashes are needed for some ECDSA signature tests.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # ECP should be re-enabled but not the others
+ not grep mbedtls_ecdh_ library/ecdh.o
+ not grep mbedtls_ecdsa library/ecdsa.o
+ not grep mbedtls_ecjpake library/ecjpake.o
+ grep mbedtls_ecp library/ecp.o
+
+ # Run the tests
+ # -------------
+
+ msg "test suites: full with accelerated EC algs and some key types"
+ make test
+}
+
+# Run tests with only (non-)Weierstrass accelerated
+# Common code used in:
+# - component_test_psa_crypto_config_accel_ecc_weierstrass_curves
+# - component_test_psa_crypto_config_accel_ecc_non_weierstrass_curves
+common_test_psa_crypto_config_accel_ecc_some_curves () {
+ weierstrass=$1
+ if [ $weierstrass -eq 1 ]; then
+ desc="Weierstrass"
+ else
+ desc="non-Weierstrass"
+ fi
+
+ msg "build: crypto_full minus PK with accelerated EC algs and $desc curves"
+
+ # Note: Curves are handled in a special way by the libtestdriver machinery,
+ # so we only want to include them in the accel list when building the main
+ # libraries, hence the use of a separate variable.
+ # Note: the following loop is a modified version of
+ # helper_get_psa_curve_list that only keeps Weierstrass families.
+ loc_weierstrass_list=""
+ loc_non_weierstrass_list=""
+ for item in $(sed -n 's/^#define PSA_WANT_\(ECC_[0-9A-Z_a-z]*\).*/\1/p' <"$CRYPTO_CONFIG_H"); do
+ case $item in
+ ECC_BRAINPOOL*|ECC_SECP*)
+ loc_weierstrass_list="$loc_weierstrass_list $item"
+ ;;
+ *)
+ loc_non_weierstrass_list="$loc_non_weierstrass_list $item"
+ ;;
+ esac
+ done
+ if [ $weierstrass -eq 1 ]; then
+ loc_curve_list=$loc_weierstrass_list
+ else
+ loc_curve_list=$loc_non_weierstrass_list
+ fi
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ ALG_ECDH \
+ ALG_JPAKE \
+ $(helper_get_psa_key_type_list "ECC") \
+ $loc_curve_list"
+
+ # Configure
+ # ---------
+
+ # Start with config crypto_full and remove PK_C:
+ # that's what's supported now, see docs/driver-only-builds.md.
+ helper_libtestdriver1_adjust_config "crypto_full"
+ scripts/config.py unset MBEDTLS_PK_C
+ scripts/config.py unset MBEDTLS_PK_PARSE_C
+ scripts/config.py unset MBEDTLS_PK_WRITE_C
+
+ # Disable modules that are accelerated - some will be re-enabled
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ scripts/config.py unset MBEDTLS_ECP_C
+
+ # Disable all curves - those that aren't accelerated should be re-enabled
+ helper_disable_builtin_curves
+
+ # Restartable feature is not yet supported by PSA. Once it will in
+ # the future, the following line could be removed (see issues
+ # 6061, 6332 and following ones)
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+
+ # this is not supported by the driver API yet
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE
+
+ # Build
+ # -----
+
+ # These hashes are needed for some ECDSA signature tests.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # We expect ECDH to be re-enabled for the missing curves
+ grep mbedtls_ecdh_ library/ecdh.o
+ # We expect ECP to be re-enabled, however the parts specific to the
+ # families of curves that are accelerated should be ommited.
+ # - functions with mxz in the name are specific to Montgomery curves
+ # - ecp_muladd is specific to Weierstrass curves
+ ##nm library/ecp.o | tee ecp.syms
+ if [ $weierstrass -eq 1 ]; then
+ not grep mbedtls_ecp_muladd library/ecp.o
+ grep mxz library/ecp.o
+ else
+ grep mbedtls_ecp_muladd library/ecp.o
+ not grep mxz library/ecp.o
+ fi
+ # We expect ECDSA and ECJPAKE to be re-enabled only when
+ # Weierstrass curves are not accelerated
+ if [ $weierstrass -eq 1 ]; then
+ not grep mbedtls_ecdsa library/ecdsa.o
+ not grep mbedtls_ecjpake library/ecjpake.o
+ else
+ grep mbedtls_ecdsa library/ecdsa.o
+ grep mbedtls_ecjpake library/ecjpake.o
+ fi
+
+ # Run the tests
+ # -------------
+
+ msg "test suites: crypto_full minus PK with accelerated EC algs and $desc curves"
+ make test
+}
+
+component_test_psa_crypto_config_accel_ecc_weierstrass_curves () {
+ common_test_psa_crypto_config_accel_ecc_some_curves 1
+}
+
+component_test_psa_crypto_config_accel_ecc_non_weierstrass_curves () {
+ common_test_psa_crypto_config_accel_ecc_some_curves 0
+}
+
+# Auxiliary function to build config for all EC based algorithms (EC-JPAKE,
+# ECDH, ECDSA) with and without drivers.
+# The input parameter is a boolean value which indicates:
+# - 0 keep built-in EC algs,
+# - 1 exclude built-in EC algs (driver only).
+#
+# This is used by the two following components to ensure they always use the
+# same config, except for the use of driver or built-in EC algorithms:
+# - component_test_psa_crypto_config_accel_ecc_ecp_light_only;
+# - component_test_psa_crypto_config_reference_ecc_ecp_light_only.
+# This supports comparing their test coverage with analyze_outcomes.py.
+config_psa_crypto_config_ecp_light_only () {
+ driver_only="$1"
+ # start with config full for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+ if [ "$driver_only" -eq 1 ]; then
+ # Disable modules that are accelerated
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ scripts/config.py unset MBEDTLS_ECP_C
+ fi
+
+ # Restartable feature is not yet supported by PSA. Once it will in
+ # the future, the following line could be removed (see issues
+ # 6061, 6332 and following ones)
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+}
+
+# Keep in sync with component_test_psa_crypto_config_reference_ecc_ecp_light_only
+component_test_psa_crypto_config_accel_ecc_ecp_light_only () {
+ msg "build: full with accelerated EC algs"
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ ALG_ECDH \
+ ALG_JPAKE \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ # Use the same config as reference, only without built-in EC algs
+ config_psa_crypto_config_ecp_light_only 1
+
+ # Do not disable builtin curves because that support is required for:
+ # - MBEDTLS_PK_PARSE_EC_EXTENDED
+ # - MBEDTLS_PK_PARSE_EC_COMPRESSED
+
+ # Build
+ # -----
+
+ # These hashes are needed for some ECDSA signature tests.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure any built-in EC alg was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdsa_ library/ecdsa.o
+ not grep mbedtls_ecdh_ library/ecdh.o
+ not grep mbedtls_ecjpake_ library/ecjpake.o
+ not grep mbedtls_ecp_mul library/ecp.o
+
+ # Run the tests
+ # -------------
+
+ msg "test suites: full with accelerated EC algs"
+ make test
+
+ msg "ssl-opt: full with accelerated EC algs"
+ tests/ssl-opt.sh
+}
+
+# Keep in sync with component_test_psa_crypto_config_accel_ecc_ecp_light_only
+component_test_psa_crypto_config_reference_ecc_ecp_light_only () {
+ msg "build: MBEDTLS_PSA_CRYPTO_CONFIG with non-accelerated EC algs"
+
+ config_psa_crypto_config_ecp_light_only 0
+
+ make
+
+ msg "test suites: full with non-accelerated EC algs"
+ make test
+
+ msg "ssl-opt: full with non-accelerated EC algs"
+ tests/ssl-opt.sh
+}
+
+# This helper function is used by:
+# - component_test_psa_crypto_config_accel_ecc_no_ecp_at_all()
+# - component_test_psa_crypto_config_reference_ecc_no_ecp_at_all()
+# to ensure that both tests use the same underlying configuration when testing
+# driver's coverage with analyze_outcomes.py.
+#
+# This functions accepts 1 boolean parameter as follows:
+# - 1: building with accelerated EC algorithms (ECDSA, ECDH, ECJPAKE), therefore
+# excluding their built-in implementation as well as ECP_C & ECP_LIGHT
+# - 0: include built-in implementation of EC algorithms.
+#
+# PK_C and RSA_C are always disabled to ensure there is no remaining dependency
+# on the ECP module.
+config_psa_crypto_no_ecp_at_all () {
+ driver_only="$1"
+ # start with full config for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+
+ if [ "$driver_only" -eq 1 ]; then
+ # Disable modules that are accelerated
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ # Disable ECP module (entirely)
+ scripts/config.py unset MBEDTLS_ECP_C
+ fi
+
+ # Disable all the features that auto-enable ECP_LIGHT (see build_info.h)
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_EXTENDED
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_COMPRESSED
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE
+
+ # Restartable feature is not yet supported by PSA. Once it will in
+ # the future, the following line could be removed (see issues
+ # 6061, 6332 and following ones)
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+}
+
+# Build and test a configuration where driver accelerates all EC algs while
+# all support and dependencies from ECP and ECP_LIGHT are removed on the library
+# side.
+#
+# Keep in sync with component_test_psa_crypto_config_reference_ecc_no_ecp_at_all()
+component_test_psa_crypto_config_accel_ecc_no_ecp_at_all () {
+ msg "build: full + accelerated EC algs - ECP"
+
+ # Algorithms and key types to accelerate
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ ALG_ECDH \
+ ALG_JPAKE \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+
+ # Configure
+ # ---------
+
+ # Set common configurations between library's and driver's builds
+ config_psa_crypto_no_ecp_at_all 1
+ # Disable all the builtin curves. All the required algs are accelerated.
+ helper_disable_builtin_curves
+
+ # Build
+ # -----
+
+ # Things we wanted supported in libtestdriver1, but not accelerated in the main library:
+ # SHA-1 and all SHA-2/3 variants, as they are used by ECDSA deterministic.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure any built-in EC alg was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdsa_ library/ecdsa.o
+ not grep mbedtls_ecdh_ library/ecdh.o
+ not grep mbedtls_ecjpake_ library/ecjpake.o
+ # Also ensure that ECP module was not re-enabled
+ not grep mbedtls_ecp_ library/ecp.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full + accelerated EC algs - ECP"
+ make test
+
+ msg "ssl-opt: full + accelerated EC algs - ECP"
+ tests/ssl-opt.sh
+}
+
+# Reference function used for driver's coverage analysis in analyze_outcomes.py
+# in conjunction with component_test_psa_crypto_config_accel_ecc_no_ecp_at_all().
+# Keep in sync with its accelerated counterpart.
+component_test_psa_crypto_config_reference_ecc_no_ecp_at_all () {
+ msg "build: full + non accelerated EC algs"
+
+ config_psa_crypto_no_ecp_at_all 0
+
+ make
+
+ msg "test: full + non accelerated EC algs"
+ make test
+
+ msg "ssl-opt: full + non accelerated EC algs"
+ tests/ssl-opt.sh
+}
+
+# This is a common configuration helper used directly from:
+# - common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum
+# - common_test_psa_crypto_config_reference_ecc_ffdh_no_bignum
+# and indirectly from:
+# - component_test_psa_crypto_config_accel_ecc_no_bignum
+# - accelerate all EC algs, disable RSA and FFDH
+# - component_test_psa_crypto_config_reference_ecc_no_bignum
+# - this is the reference component of the above
+# - it still disables RSA and FFDH, but it uses builtin EC algs
+# - component_test_psa_crypto_config_accel_ecc_ffdh_no_bignum
+# - accelerate all EC and FFDH algs, disable only RSA
+# - component_test_psa_crypto_config_reference_ecc_ffdh_no_bignum
+# - this is the reference component of the above
+# - it still disables RSA, but it uses builtin EC and FFDH algs
+#
+# This function accepts 2 parameters:
+# $1: a boolean value which states if we are testing an accelerated scenario
+# or not.
+# $2: a string value which states which components are tested. Allowed values
+# are "ECC" or "ECC_DH".
+config_psa_crypto_config_accel_ecc_ffdh_no_bignum() {
+ driver_only="$1"
+ test_target="$2"
+ # start with full config for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+
+ if [ "$driver_only" -eq 1 ]; then
+ # Disable modules that are accelerated
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_ECJPAKE_C
+ # Disable ECP module (entirely)
+ scripts/config.py unset MBEDTLS_ECP_C
+ # Also disable bignum
+ scripts/config.py unset MBEDTLS_BIGNUM_C
+ fi
+
+ # Disable all the features that auto-enable ECP_LIGHT (see build_info.h)
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_EXTENDED
+ scripts/config.py unset MBEDTLS_PK_PARSE_EC_COMPRESSED
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_ECC_KEY_PAIR_DERIVE
+
+ # RSA support is intentionally disabled on this test because RSA_C depends
+ # on BIGNUM_C.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset-all "PSA_WANT_KEY_TYPE_RSA_[0-9A-Z_a-z]*"
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset-all "PSA_WANT_ALG_RSA_[0-9A-Z_a-z]*"
+ scripts/config.py unset MBEDTLS_RSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V15
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ # Also disable key exchanges that depend on RSA
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+
+ if [ "$test_target" = "ECC" ]; then
+ # When testing ECC only, we disable FFDH support, both from builtin and
+ # PSA sides, and also disable the key exchanges that depend on DHM.
+ scripts/config.py -f include/psa/crypto_config.h unset PSA_WANT_ALG_FFDH
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset-all "PSA_WANT_KEY_TYPE_DH_[0-9A-Z_a-z]*"
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset-all "PSA_WANT_DH_RFC7919_[0-9]*"
+ scripts/config.py unset MBEDTLS_DHM_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ else
+ # When testing ECC and DH instead, we disable DHM and depending key
+ # exchanges only in the accelerated build
+ if [ "$driver_only" -eq 1 ]; then
+ scripts/config.py unset MBEDTLS_DHM_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ fi
+ fi
+
+ # Restartable feature is not yet supported by PSA. Once it will in
+ # the future, the following line could be removed (see issues
+ # 6061, 6332 and following ones)
+ scripts/config.py unset MBEDTLS_ECP_RESTARTABLE
+}
+
+# Common helper used by:
+# - component_test_psa_crypto_config_accel_ecc_no_bignum
+# - component_test_psa_crypto_config_accel_ecc_ffdh_no_bignum
+#
+# The goal is to build and test accelerating either:
+# - ECC only or
+# - both ECC and FFDH
+#
+# It is meant to be used in conjunction with
+# common_test_psa_crypto_config_reference_ecc_ffdh_no_bignum() for drivers
+# coverage analysis in the "analyze_outcomes.py" script.
+common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum () {
+ test_target="$1"
+
+ # This is an internal helper to simplify text message handling
+ if [ "$test_target" = "ECC_DH" ]; then
+ accel_text="ECC/FFDH"
+ removed_text="ECP - DH"
+ else
+ accel_text="ECC"
+ removed_text="ECP"
+ fi
+
+ msg "build: full + accelerated $accel_text algs + USE_PSA - $removed_text - BIGNUM"
+
+ # By default we accelerate all EC keys/algs
+ loc_accel_list="ALG_ECDSA ALG_DETERMINISTIC_ECDSA \
+ ALG_ECDH \
+ ALG_JPAKE \
+ $(helper_get_psa_key_type_list "ECC") \
+ $(helper_get_psa_curve_list)"
+ # Optionally we can also add DH to the list of accelerated items
+ if [ "$test_target" = "ECC_DH" ]; then
+ loc_accel_list="$loc_accel_list \
+ ALG_FFDH \
+ $(helper_get_psa_key_type_list "DH") \
+ $(helper_get_psa_dh_group_list)"
+ fi
+
+ # Configure
+ # ---------
+
+ # Set common configurations between library's and driver's builds
+ config_psa_crypto_config_accel_ecc_ffdh_no_bignum 1 "$test_target"
+ # Disable all the builtin curves. All the required algs are accelerated.
+ helper_disable_builtin_curves
+
+ # Build
+ # -----
+
+ # Things we wanted supported in libtestdriver1, but not accelerated in the main library:
+ # SHA-1 and all SHA-2/3 variants, as they are used by ECDSA deterministic.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure any built-in EC alg was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdsa_ library/ecdsa.o
+ not grep mbedtls_ecdh_ library/ecdh.o
+ not grep mbedtls_ecjpake_ library/ecjpake.o
+ # Also ensure that ECP, RSA, [DHM] or BIGNUM modules were not re-enabled
+ not grep mbedtls_ecp_ library/ecp.o
+ not grep mbedtls_rsa_ library/rsa.o
+ not grep mbedtls_mpi_ library/bignum.o
+ not grep mbedtls_dhm_ library/dhm.o
+
+ # Run the tests
+ # -------------
+
+ msg "test suites: full + accelerated $accel_text algs + USE_PSA - $removed_text - DHM - BIGNUM"
+
+ make test
+
+ msg "ssl-opt: full + accelerated $accel_text algs + USE_PSA - $removed_text - BIGNUM"
+ tests/ssl-opt.sh
+}
+
+# Common helper used by:
+# - component_test_psa_crypto_config_reference_ecc_no_bignum
+# - component_test_psa_crypto_config_reference_ecc_ffdh_no_bignum
+#
+# The goal is to build and test a reference scenario (i.e. with builtin
+# components) compared to the ones used in
+# common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum() above.
+#
+# It is meant to be used in conjunction with
+# common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum() for drivers'
+# coverage analysis in "analyze_outcomes.py" script.
+common_test_psa_crypto_config_reference_ecc_ffdh_no_bignum () {
+ test_target="$1"
+
+ # This is an internal helper to simplify text message handling
+ if [ "$test_target" = "ECC_DH" ]; then
+ accel_text="ECC/FFDH"
+ else
+ accel_text="ECC"
+ fi
+
+ msg "build: full + non accelerated $accel_text algs + USE_PSA"
+
+ config_psa_crypto_config_accel_ecc_ffdh_no_bignum 0 "$test_target"
+
+ make
+
+ msg "test suites: full + non accelerated EC algs + USE_PSA"
+ make test
+
+ msg "ssl-opt: full + non accelerated $accel_text algs + USE_PSA"
+ tests/ssl-opt.sh
+}
+
+component_test_psa_crypto_config_accel_ecc_no_bignum () {
+ common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum "ECC"
+}
+
+component_test_psa_crypto_config_reference_ecc_no_bignum () {
+ common_test_psa_crypto_config_reference_ecc_ffdh_no_bignum "ECC"
+}
+
+component_test_psa_crypto_config_accel_ecc_ffdh_no_bignum () {
+ common_test_psa_crypto_config_accel_ecc_ffdh_no_bignum "ECC_DH"
+}
+
+component_test_psa_crypto_config_reference_ecc_ffdh_no_bignum () {
+ common_test_psa_crypto_config_reference_ecc_ffdh_no_bignum "ECC_DH"
+}
+
+# Helper for setting common configurations between:
+# - component_test_tfm_config_p256m_driver_accel_ec()
+# - component_test_tfm_config()
+common_tfm_config () {
+ # Enable TF-M config
+ cp configs/config-tfm.h "$CONFIG_H"
+ echo "#undef MBEDTLS_PSA_CRYPTO_CONFIG_FILE" >> "$CONFIG_H"
+ cp configs/ext/crypto_config_profile_medium.h "$CRYPTO_CONFIG_H"
+
+ # Other config adjustment to make the tests pass.
+ # This should probably be adopted upstream.
+ #
+ # - USE_PSA_CRYPTO for PK_HAVE_ECC_KEYS
+ echo "#define MBEDTLS_USE_PSA_CRYPTO" >> "$CONFIG_H"
+
+ # Config adjustment for better test coverage in our environment.
+ # This is not needed just to build and pass tests.
+ #
+ # Enable filesystem I/O for the benefit of PK parse/write tests.
+ echo "#define MBEDTLS_FS_IO" >> "$CONFIG_H"
+}
+
+# Keep this in sync with component_test_tfm_config() as they are both meant
+# to be used in analyze_outcomes.py for driver's coverage analysis.
+component_test_tfm_config_p256m_driver_accel_ec () {
+ msg "build: TF-M config + p256m driver + accel ECDH(E)/ECDSA"
+
+ common_tfm_config
+
+ # Build crypto library
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -I../tests/include/spe" LDFLAGS="$ASAN_CFLAGS"
+
+ # Make sure any built-in EC alg was not re-enabled by accident (additive config)
+ not grep mbedtls_ecdsa_ library/ecdsa.o
+ not grep mbedtls_ecdh_ library/ecdh.o
+ not grep mbedtls_ecjpake_ library/ecjpake.o
+ # Also ensure that ECP, RSA, DHM or BIGNUM modules were not re-enabled
+ not grep mbedtls_ecp_ library/ecp.o
+ not grep mbedtls_rsa_ library/rsa.o
+ not grep mbedtls_dhm_ library/dhm.o
+ not grep mbedtls_mpi_ library/bignum.o
+ # Check that p256m was built
+ grep -q p256_ecdsa_ library/libmbedcrypto.a
+
+ # In "config-tfm.h" we disabled CIPHER_C tweaking TF-M's configuration
+ # files, so we want to ensure that it has not be re-enabled accidentally.
+ not grep mbedtls_cipher library/cipher.o
+
+ # Run the tests
+ msg "test: TF-M config + p256m driver + accel ECDH(E)/ECDSA"
+ make test
+}
+
+# Keep this in sync with component_test_tfm_config_p256m_driver_accel_ec() as
+# they are both meant to be used in analyze_outcomes.py for driver's coverage
+# analysis.
+component_test_tfm_config() {
+ common_tfm_config
+
+ # Disable P256M driver, which is on by default, so that analyze_outcomes
+ # can compare this test with test_tfm_config_p256m_driver_accel_ec
+ echo "#undef MBEDTLS_PSA_P256M_DRIVER_ENABLED" >> "$CONFIG_H"
+
+ msg "build: TF-M config"
+ make CFLAGS='-Werror -Wall -Wextra -I../tests/include/spe' tests
+
+ # Check that p256m was not built
+ not grep p256_ecdsa_ library/libmbedcrypto.a
+
+ # In "config-tfm.h" we disabled CIPHER_C tweaking TF-M's configuration
+ # files, so we want to ensure that it has not be re-enabled accidentally.
+ not grep mbedtls_cipher library/cipher.o
+
+ msg "test: TF-M config"
+ make test
+}
+
+# Common helper for component_full_without_ecdhe_ecdsa() and
+# component_full_without_ecdhe_ecdsa_and_tls13() which:
+# - starts from the "full" configuration minus the list of symbols passed in
+# as 1st parameter
+# - build
+# - test only TLS (i.e. test_suite_tls and ssl-opt)
+build_full_minus_something_and_test_tls () {
+ symbols_to_disable="$1"
+
+ msg "build: full minus something, test TLS"
+
+ scripts/config.py full
+ for sym in $symbols_to_disable; do
+ echo "Disabling $sym"
+ scripts/config.py unset $sym
+ done
+
+ make
+
+ msg "test: full minus something, test TLS"
+ ( cd tests; ./test_suite_ssl )
+
+ msg "ssl-opt: full minus something, test TLS"
+ tests/ssl-opt.sh
+}
+
+component_full_without_ecdhe_ecdsa () {
+ build_full_minus_something_and_test_tls "MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED"
+}
+
+component_full_without_ecdhe_ecdsa_and_tls13 () {
+ build_full_minus_something_and_test_tls "MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ MBEDTLS_SSL_PROTO_TLS1_3"
+}
+
+# This is an helper used by:
+# - component_test_psa_ecc_key_pair_no_derive
+# - component_test_psa_ecc_key_pair_no_generate
+# The goal is to test with all PSA_WANT_KEY_TYPE_xxx_KEY_PAIR_yyy symbols
+# enabled, but one. Input arguments are as follows:
+# - $1 is the key type under test, i.e. ECC/RSA/DH
+# - $2 is the key option to be unset (i.e. generate, derive, etc)
+build_and_test_psa_want_key_pair_partial() {
+ key_type=$1
+ unset_option=$2
+ disabled_psa_want="PSA_WANT_KEY_TYPE_${key_type}_KEY_PAIR_${unset_option}"
+
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO - ${disabled_psa_want}"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+
+ # All the PSA_WANT_KEY_TYPE_xxx_KEY_PAIR_yyy are enabled by default in
+ # crypto_config.h so we just disable the one we don't want.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset "$disabled_psa_want"
+
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full - MBEDTLS_USE_PSA_CRYPTO - ${disabled_psa_want}"
+ make test
+}
+
+component_test_psa_ecc_key_pair_no_derive() {
+ build_and_test_psa_want_key_pair_partial "ECC" "DERIVE"
+}
+
+component_test_psa_ecc_key_pair_no_generate() {
+ build_and_test_psa_want_key_pair_partial "ECC" "GENERATE"
+}
+
+config_psa_crypto_accel_rsa () {
+ driver_only=$1
+
+ # Start from crypto_full config (no X.509, no TLS)
+ helper_libtestdriver1_adjust_config "crypto_full"
+
+ if [ "$driver_only" -eq 1 ]; then
+ # Remove RSA support and its dependencies
+ scripts/config.py unset MBEDTLS_RSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V15
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+
+ # We need PEM parsing in the test library as well to support the import
+ # of PEM encoded RSA keys.
+ scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_PEM_PARSE_C
+ scripts/config.py -f "$CONFIG_TEST_DRIVER_H" set MBEDTLS_BASE64_C
+ fi
+}
+
+component_test_psa_crypto_config_accel_rsa_crypto () {
+ msg "build: crypto_full with accelerated RSA"
+
+ loc_accel_list="ALG_RSA_OAEP ALG_RSA_PSS \
+ ALG_RSA_PKCS1V15_CRYPT ALG_RSA_PKCS1V15_SIGN \
+ KEY_TYPE_RSA_PUBLIC_KEY \
+ KEY_TYPE_RSA_KEY_PAIR_BASIC \
+ KEY_TYPE_RSA_KEY_PAIR_GENERATE \
+ KEY_TYPE_RSA_KEY_PAIR_IMPORT \
+ KEY_TYPE_RSA_KEY_PAIR_EXPORT"
+
+ # Configure
+ # ---------
+
+ config_psa_crypto_accel_rsa 1
+
+ # Build
+ # -----
+
+ # These hashes are needed for unit tests.
+ loc_extra_list="ALG_SHA_1 ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512 ALG_MD5"
+ helper_libtestdriver1_make_drivers "$loc_accel_list" "$loc_extra_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_rsa library/rsa.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: crypto_full with accelerated RSA"
+ make test
+}
+
+component_test_psa_crypto_config_reference_rsa_crypto () {
+ msg "build: crypto_full with non-accelerated RSA"
+
+ # Configure
+ # ---------
+ config_psa_crypto_accel_rsa 0
+
+ # Build
+ # -----
+ make
+
+ # Run the tests
+ # -------------
+ msg "test: crypto_full with non-accelerated RSA"
+ make test
+}
+
+# This is a temporary test to verify that full RSA support is present even when
+# only one single new symbols (PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC) is defined.
+component_test_new_psa_want_key_pair_symbol() {
+ msg "Build: crypto config - MBEDTLS_RSA_C + PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC"
+
+ # Create a temporary output file unless there is already one set
+ if [ "$MBEDTLS_TEST_OUTCOME_FILE" ]; then
+ REMOVE_OUTCOME_ON_EXIT="no"
+ else
+ REMOVE_OUTCOME_ON_EXIT="yes"
+ MBEDTLS_TEST_OUTCOME_FILE="$PWD/out.csv"
+ export MBEDTLS_TEST_OUTCOME_FILE
+ fi
+
+ # Start from crypto configuration
+ scripts/config.py crypto
+
+ # Remove RSA support and its dependencies
+ scripts/config.py unset MBEDTLS_PKCS1_V15
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_RSA_C
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+
+ # Enable PSA support
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+
+ # Keep only PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC enabled in order to ensure
+ # that proper translations is done in crypto_legacy.h.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_IMPORT
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_EXPORT
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE
+
+ make
+
+ msg "Test: crypto config - MBEDTLS_RSA_C + PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC"
+ make test
+
+ # Parse only 1 relevant line from the outcome file, i.e. a test which is
+ # performing RSA signature.
+ msg "Verify that 'RSA PKCS1 Sign #1 (SHA512, 1536 bits RSA)' is PASS"
+ cat $MBEDTLS_TEST_OUTCOME_FILE | grep 'RSA PKCS1 Sign #1 (SHA512, 1536 bits RSA)' | grep -q "PASS"
+
+ if [ "$REMOVE_OUTCOME_ON_EXIT" == "yes" ]; then
+ rm $MBEDTLS_TEST_OUTCOME_FILE
+ fi
+}
+
+component_test_psa_crypto_config_accel_hash () {
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated hash"
+
+ loc_accel_list="ALG_MD5 ALG_RIPEMD160 ALG_SHA_1 \
+ ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ # Configure
+ # ---------
+
+ # Start from default config (no USE_PSA)
+ helper_libtestdriver1_adjust_config "default"
+
+ # Disable the things that are being accelerated
+ scripts/config.py unset MBEDTLS_MD5_C
+ scripts/config.py unset MBEDTLS_RIPEMD160_C
+ scripts/config.py unset MBEDTLS_SHA1_C
+ scripts/config.py unset MBEDTLS_SHA224_C
+ scripts/config.py unset MBEDTLS_SHA256_C
+ scripts/config.py unset MBEDTLS_SHA384_C
+ scripts/config.py unset MBEDTLS_SHA512_C
+ scripts/config.py unset MBEDTLS_SHA3_C
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # There's a risk of something getting re-enabled via config_psa.h;
+ # make sure it did not happen. Note: it's OK for MD_C to be enabled.
+ not grep mbedtls_md5 library/md5.o
+ not grep mbedtls_sha1 library/sha1.o
+ not grep mbedtls_sha256 library/sha256.o
+ not grep mbedtls_sha512 library/sha512.o
+ not grep mbedtls_ripemd160 library/ripemd160.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated hash"
+ make test
+}
+
+component_test_psa_crypto_config_accel_hash_keep_builtins () {
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated+builtin hash"
+ # This component ensures that all the test cases for
+ # md_psa_dynamic_dispatch with legacy+driver in test_suite_md are run.
+
+ loc_accel_list="ALG_MD5 ALG_RIPEMD160 ALG_SHA_1 \
+ ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ # Start from default config (no USE_PSA)
+ helper_libtestdriver1_adjust_config "default"
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated+builtin hash"
+ make test
+}
+
+# Auxiliary function to build config for hashes with and without drivers
+config_psa_crypto_hash_use_psa () {
+ driver_only="$1"
+ # start with config full for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+ if [ "$driver_only" -eq 1 ]; then
+ # disable the built-in implementation of hashes
+ scripts/config.py unset MBEDTLS_MD5_C
+ scripts/config.py unset MBEDTLS_RIPEMD160_C
+ scripts/config.py unset MBEDTLS_SHA1_C
+ scripts/config.py unset MBEDTLS_SHA224_C
+ scripts/config.py unset MBEDTLS_SHA256_C # see external RNG below
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+ scripts/config.py unset MBEDTLS_SHA384_C
+ scripts/config.py unset MBEDTLS_SHA512_C
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT
+ scripts/config.py unset MBEDTLS_SHA3_C
+ fi
+}
+
+# Note that component_test_psa_crypto_config_reference_hash_use_psa
+# is related to this component and both components need to be kept in sync.
+# For details please see comments for component_test_psa_crypto_config_reference_hash_use_psa.
+component_test_psa_crypto_config_accel_hash_use_psa () {
+ msg "test: full with accelerated hashes"
+
+ loc_accel_list="ALG_MD5 ALG_RIPEMD160 ALG_SHA_1 \
+ ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ # Configure
+ # ---------
+
+ config_psa_crypto_hash_use_psa 1
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # There's a risk of something getting re-enabled via config_psa.h;
+ # make sure it did not happen. Note: it's OK for MD_C to be enabled.
+ not grep mbedtls_md5 library/md5.o
+ not grep mbedtls_sha1 library/sha1.o
+ not grep mbedtls_sha256 library/sha256.o
+ not grep mbedtls_sha512 library/sha512.o
+ not grep mbedtls_ripemd160 library/ripemd160.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full with accelerated hashes"
+ make test
+
+ # This is mostly useful so that we can later compare outcome files with
+ # the reference config in analyze_outcomes.py, to check that the
+ # dependency declarations in ssl-opt.sh and in TLS code are correct.
+ msg "test: ssl-opt.sh, full with accelerated hashes"
+ tests/ssl-opt.sh
+
+ # This is to make sure all ciphersuites are exercised, but we don't need
+ # interop testing (besides, we already got some from ssl-opt.sh).
+ msg "test: compat.sh, full with accelerated hashes"
+ tests/compat.sh -p mbedTLS -V YES
+}
+
+# This component provides reference configuration for test_psa_crypto_config_accel_hash_use_psa
+# without accelerated hash. The outcome from both components are used by the analyze_outcomes.py
+# script to find regression in test coverage when accelerated hash is used (tests and ssl-opt).
+# Both components need to be kept in sync.
+component_test_psa_crypto_config_reference_hash_use_psa() {
+ msg "test: full without accelerated hashes"
+
+ config_psa_crypto_hash_use_psa 0
+
+ make
+
+ msg "test: full without accelerated hashes"
+ make test
+
+ msg "test: ssl-opt.sh, full without accelerated hashes"
+ tests/ssl-opt.sh
+}
+
+# Auxiliary function to build config for hashes with and without drivers
+config_psa_crypto_hmac_use_psa () {
+ driver_only="$1"
+ # start with config full for maximum coverage (also enables USE_PSA)
+ helper_libtestdriver1_adjust_config "full"
+
+ if [ "$driver_only" -eq 1 ]; then
+ # Disable MD_C in order to disable the builtin support for HMAC. MD_LIGHT
+ # is still enabled though (for ENTROPY_C among others).
+ scripts/config.py unset MBEDTLS_MD_C
+ # Disable also the builtin hashes since they are supported by the driver
+ # and MD module is able to perform PSA dispathing.
+ scripts/config.py unset-all MBEDTLS_SHA
+ scripts/config.py unset MBEDTLS_MD5_C
+ scripts/config.py unset MBEDTLS_RIPEMD160_C
+ fi
+
+ # Direct dependencies of MD_C. We disable them also in the reference
+ # component to work with the same set of features.
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py unset MBEDTLS_PKCS5_C
+ scripts/config.py unset MBEDTLS_HMAC_DRBG_C
+ scripts/config.py unset MBEDTLS_HKDF_C
+ # Dependencies of HMAC_DRBG
+ scripts/config.py unset MBEDTLS_ECDSA_DETERMINISTIC
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_DETERMINISTIC_ECDSA
+}
+
+component_test_psa_crypto_config_accel_hmac() {
+ msg "test: full with accelerated hmac"
+
+ loc_accel_list="ALG_HMAC KEY_TYPE_HMAC \
+ ALG_MD5 ALG_RIPEMD160 ALG_SHA_1 \
+ ALG_SHA_224 ALG_SHA_256 ALG_SHA_384 ALG_SHA_512 \
+ ALG_SHA3_224 ALG_SHA3_256 ALG_SHA3_384 ALG_SHA3_512"
+
+ # Configure
+ # ---------
+
+ config_psa_crypto_hmac_use_psa 1
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Ensure that built-in support for HMAC is disabled.
+ not grep mbedtls_md_hmac library/md.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full with accelerated hmac"
+ make test
+}
+
+component_test_psa_crypto_config_reference_hmac() {
+ msg "test: full without accelerated hmac"
+
+ config_psa_crypto_hmac_use_psa 0
+
+ make
+
+ msg "test: full without accelerated hmac"
+ make test
+}
+
+component_test_psa_crypto_config_accel_des () {
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated DES"
+
+ # Albeit this components aims at accelerating DES which should only support
+ # CBC and ECB modes, we need to accelerate more than that otherwise DES_C
+ # would automatically be re-enabled by "config_adjust_legacy_from_psa.c"
+ loc_accel_list="ALG_ECB_NO_PADDING ALG_CBC_NO_PADDING ALG_CBC_PKCS7 \
+ ALG_CTR ALG_CFB ALG_OFB ALG_XTS ALG_CMAC \
+ KEY_TYPE_DES"
+
+ # Note: we cannot accelerate all ciphers' key types otherwise we would also
+ # have to either disable CCM/GCM or accelerate them, but that's out of scope
+ # of this component. This limitation will be addressed by #8598.
+
+ # Configure
+ # ---------
+
+ # Start from the full config
+ helper_libtestdriver1_adjust_config "full"
+
+ # Disable the things that are being accelerated
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CIPHER_PADDING_PKCS7
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CTR
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CFB
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_OFB
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_XTS
+ scripts/config.py unset MBEDTLS_DES_C
+ scripts/config.py unset MBEDTLS_CMAC_C
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_des* library/des.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated DES"
+ make test
+}
+
+component_test_psa_crypto_config_accel_aead () {
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated AEAD"
+
+ loc_accel_list="ALG_GCM ALG_CCM ALG_CHACHA20_POLY1305 \
+ KEY_TYPE_AES KEY_TYPE_CHACHA20 KEY_TYPE_ARIA KEY_TYPE_CAMELLIA"
+
+ # Configure
+ # ---------
+
+ # Start from full config
+ helper_libtestdriver1_adjust_config "full"
+
+ # Disable things that are being accelerated
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+
+ # Disable CCM_STAR_NO_TAG because this re-enables CCM_C.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_ccm library/ccm.o
+ not grep mbedtls_gcm library/gcm.o
+ not grep mbedtls_chachapoly library/chachapoly.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: MBEDTLS_PSA_CRYPTO_CONFIG with accelerated AEAD"
+ make test
+}
+
+# This is a common configuration function used in:
+# - component_test_psa_crypto_config_accel_cipher_aead_cmac
+# - component_test_psa_crypto_config_reference_cipher_aead_cmac
+common_psa_crypto_config_accel_cipher_aead_cmac() {
+ # Start from the full config
+ helper_libtestdriver1_adjust_config "full"
+
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+}
+
+# The 2 following test components, i.e.
+# - component_test_psa_crypto_config_accel_cipher_aead_cmac
+# - component_test_psa_crypto_config_reference_cipher_aead_cmac
+# are meant to be used together in analyze_outcomes.py script in order to test
+# driver's coverage for ciphers and AEADs.
+component_test_psa_crypto_config_accel_cipher_aead_cmac () {
+ msg "build: full config with accelerated cipher inc. AEAD and CMAC"
+
+ loc_accel_list="ALG_ECB_NO_PADDING ALG_CBC_NO_PADDING ALG_CBC_PKCS7 ALG_CTR ALG_CFB \
+ ALG_OFB ALG_XTS ALG_STREAM_CIPHER ALG_CCM_STAR_NO_TAG \
+ ALG_GCM ALG_CCM ALG_CHACHA20_POLY1305 ALG_CMAC \
+ KEY_TYPE_DES KEY_TYPE_AES KEY_TYPE_ARIA KEY_TYPE_CHACHA20 KEY_TYPE_CAMELLIA"
+
+ # Configure
+ # ---------
+
+ common_psa_crypto_config_accel_cipher_aead_cmac
+
+ # Disable the things that are being accelerated
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CIPHER_PADDING_PKCS7
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CTR
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CFB
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_OFB
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_XTS
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py unset MBEDTLS_CCM_C
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ scripts/config.py unset MBEDTLS_CMAC_C
+ scripts/config.py unset MBEDTLS_DES_C
+ scripts/config.py unset MBEDTLS_AES_C
+ scripts/config.py unset MBEDTLS_ARIA_C
+ scripts/config.py unset MBEDTLS_CHACHA20_C
+ scripts/config.py unset MBEDTLS_CAMELLIA_C
+
+ # Disable CIPHER_C entirely as all ciphers/AEADs are accelerated and PSA
+ # does not depend on it.
+ scripts/config.py unset MBEDTLS_CIPHER_C
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure this was not re-enabled by accident (additive config)
+ not grep mbedtls_cipher library/cipher.o
+ not grep mbedtls_des library/des.o
+ not grep mbedtls_aes library/aes.o
+ not grep mbedtls_aria library/aria.o
+ not grep mbedtls_camellia library/camellia.o
+ not grep mbedtls_ccm library/ccm.o
+ not grep mbedtls_gcm library/gcm.o
+ not grep mbedtls_chachapoly library/chachapoly.o
+ not grep mbedtls_cmac library/cmac.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full config with accelerated cipher inc. AEAD and CMAC"
+ make test
+
+ msg "ssl-opt: full config with accelerated cipher inc. AEAD and CMAC"
+ tests/ssl-opt.sh
+
+ msg "compat.sh: full config with accelerated cipher inc. AEAD and CMAC"
+ tests/compat.sh -V NO -p mbedTLS
+}
+
+component_test_psa_crypto_config_reference_cipher_aead_cmac () {
+ msg "build: full config with non-accelerated cipher inc. AEAD and CMAC"
+ common_psa_crypto_config_accel_cipher_aead_cmac
+
+ make
+
+ msg "test: full config with non-accelerated cipher inc. AEAD and CMAC"
+ make test
+
+ msg "ssl-opt: full config with non-accelerated cipher inc. AEAD and CMAC"
+ tests/ssl-opt.sh
+
+ msg "compat.sh: full config with non-accelerated cipher inc. AEAD and CMAC"
+ tests/compat.sh -V NO -p mbedTLS
+}
+
+common_block_cipher_dispatch() {
+ TEST_WITH_DRIVER="$1"
+
+ # Start from the full config
+ helper_libtestdriver1_adjust_config "full"
+
+ if [ "$TEST_WITH_DRIVER" -eq 1 ]; then
+ # Disable key types that are accelerated (there is no legacy equivalent
+ # symbol for ECB)
+ scripts/config.py unset MBEDTLS_AES_C
+ scripts/config.py unset MBEDTLS_ARIA_C
+ scripts/config.py unset MBEDTLS_CAMELLIA_C
+ fi
+
+ # Disable cipher's modes that, when not accelerated, cause
+ # legacy key types to be re-enabled in "config_adjust_legacy_from_psa.h".
+ # Keep this also in the reference component in order to skip the same tests
+ # that were skipped in the accelerated one.
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CTR
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CFB
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_OFB
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_NO_PADDING
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_PKCS7
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CMAC
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CCM_STAR_NO_TAG
+
+ # Disable direct dependency on AES_C
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+
+ # Prevent the cipher module from using deprecated PSA path. The reason is
+ # that otherwise there will be tests relying on "aes_info" (defined in
+ # "cipher_wrap.c") whose functions are not available when AES_C is
+ # not defined. ARIA and Camellia are not a problem in this case because
+ # the PSA path is not tested for these key types.
+ scripts/config.py set MBEDTLS_DEPRECATED_REMOVED
+}
+
+component_test_full_block_cipher_psa_dispatch () {
+ msg "build: full + PSA dispatch in block_cipher"
+
+ loc_accel_list="ALG_ECB_NO_PADDING \
+ KEY_TYPE_AES KEY_TYPE_ARIA KEY_TYPE_CAMELLIA"
+
+ # Configure
+ # ---------
+
+ common_block_cipher_dispatch 1
+
+ # Build
+ # -----
+
+ helper_libtestdriver1_make_drivers "$loc_accel_list"
+
+ helper_libtestdriver1_make_main "$loc_accel_list"
+
+ # Make sure disabled components were not re-enabled by accident (additive
+ # config)
+ not grep mbedtls_aes_ library/aes.o
+ not grep mbedtls_aria_ library/aria.o
+ not grep mbedtls_camellia_ library/camellia.o
+
+ # Run the tests
+ # -------------
+
+ msg "test: full + PSA dispatch in block_cipher"
+ make test
+}
+
+# This is the reference component of component_test_full_block_cipher_psa_dispatch
+component_test_full_block_cipher_legacy_dispatch () {
+ msg "build: full + legacy dispatch in block_cipher"
+
+ common_block_cipher_dispatch 0
+
+ make
+
+ msg "test: full + legacy dispatch in block_cipher"
+ make test
+}
+
+component_test_aead_chachapoly_disabled() {
+ msg "build: full minus CHACHAPOLY"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CHACHA20_POLY1305
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full minus CHACHAPOLY"
+ make test
+}
+
+component_test_aead_only_ccm() {
+ msg "build: full minus CHACHAPOLY and GCM"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_CHACHAPOLY_C
+ scripts/config.py unset MBEDTLS_GCM_C
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CHACHA20_POLY1305
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_GCM
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full minus CHACHAPOLY and GCM"
+ make test
+}
+
+component_test_ccm_aes_sha256() {
+ msg "build: CCM + AES + SHA256 configuration"
+
+ cp "$CONFIG_TEST_DRIVER_H" "$CONFIG_H"
+ cp configs/crypto-config-ccm-aes-sha256.h "$CRYPTO_CONFIG_H"
+
+ make
+
+ msg "test: CCM + AES + SHA256 configuration"
+ make test
+}
+
+# This should be renamed to test and updated once the accelerator ECDH code is in place and ready to test.
+component_build_psa_accel_alg_ecdh() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_ECDH without MBEDTLS_ECDH_C"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED
+ scripts/config.py unset MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_ECDH -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator HMAC code is in place and ready to test.
+component_build_psa_accel_alg_hmac() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_HMAC"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_HMAC -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator HKDF code is in place and ready to test.
+component_build_psa_accel_alg_hkdf() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_HKDF without MBEDTLS_HKDF_C"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_HKDF_C
+ # Make sure to unset TLS1_3 since it requires HKDF_C and will not build properly without it.
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_HKDF -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator MD5 code is in place and ready to test.
+component_build_psa_accel_alg_md5() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_MD5 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_512
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_MD5 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RIPEMD160 code is in place and ready to test.
+component_build_psa_accel_alg_ripemd160() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_RIPEMD160 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_512
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RIPEMD160 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA1 code is in place and ready to test.
+component_build_psa_accel_alg_sha1() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_SHA_1 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_512
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_1 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA224 code is in place and ready to test.
+component_build_psa_accel_alg_sha224() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_SHA_224 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_512
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_224 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA256 code is in place and ready to test.
+component_build_psa_accel_alg_sha256() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_SHA_256 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_512
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_256 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA384 code is in place and ready to test.
+component_build_psa_accel_alg_sha384() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_SHA_384 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_384 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator SHA512 code is in place and ready to test.
+component_build_psa_accel_alg_sha512() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_SHA_512 - other hashes"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_MD5
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RIPEMD160
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_224
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_256
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_SHA_384
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_TLS12_ECJPAKE_TO_PMS
+ scripts/config.py unset MBEDTLS_LMS_C
+ scripts/config.py unset MBEDTLS_LMS_PRIVATE
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_SHA_512 -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pkcs1v15_crypt() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_RSA_PKCS1V15_CRYPT + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_PKCS1V15_CRYPT 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_OAEP
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_CRYPT -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pkcs1v15_sign() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_RSA_PKCS1V15_SIGN + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_PKCS1V15_SIGN 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_OAEP
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PKCS1V15_SIGN -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_oaep() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_RSA_OAEP + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_OAEP 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PSS
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_OAEP -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_alg_rsa_pss() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_ALG_RSA_PSS + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_CRYPT
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_PKCS1V15_SIGN
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_RSA_OAEP
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_ALG_RSA_PSS -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_key_type_rsa_key_pair() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_xxx + PSA_WANT_ALG_RSA_PSS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_BASIC 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_IMPORT 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_EXPORT 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_KEY_TYPE_RSA_KEY_PAIR_GENERATE 1
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_KEY_PAIR -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+# This should be renamed to test and updated once the accelerator RSA code is in place and ready to test.
+component_build_psa_accel_key_type_rsa_public_key() {
+ msg "build: full - MBEDTLS_USE_PSA_CRYPTO + PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY + PSA_WANT_ALG_RSA_PSS"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_ALG_RSA_PSS 1
+ scripts/config.py -f "$CRYPTO_CONFIG_H" set PSA_WANT_KEY_TYPE_RSA_PUBLIC_KEY 1
+ # Need to define the correct symbol and include the test driver header path in order to build with the test driver
+ make CC=$ASAN_CC CFLAGS="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST -DMBEDTLS_PSA_ACCEL_KEY_TYPE_RSA_PUBLIC_KEY -I../tests/include" LDFLAGS="$ASAN_CFLAGS"
+}
+
+
+support_build_tfm_armcc () {
+ support_build_armcc
+}
+
+component_build_tfm_armcc() {
+ # test the TF-M configuration can build cleanly with various warning flags enabled
+ cp configs/config-tfm.h "$CONFIG_H"
+
+ msg "build: TF-M config, armclang armv7-m thumb2"
+ armc6_build_test "--target=arm-arm-none-eabi -march=armv7-m -mthumb -Os -std=c99 -Werror -Wall -Wextra -Wwrite-strings -Wpointer-arith -Wimplicit-fallthrough -Wshadow -Wvla -Wformat=2 -Wno-format-nonliteral -Wshadow -Wasm-operand-widths -Wunused -I../tests/include/spe"
+}
+
+component_build_tfm() {
+ # Check that the TF-M configuration can build cleanly with various
+ # warning flags enabled. We don't build or run tests, since the
+ # TF-M configuration needs a TF-M platform. A tweaked version of
+ # the configuration that works on mainstream platforms is in
+ # configs/config-tfm.h, tested via test-ref-configs.pl.
+ cp configs/config-tfm.h "$CONFIG_H"
+
+ msg "build: TF-M config, clang, armv7-m thumb2"
+ make lib CC="clang" CFLAGS="--target=arm-linux-gnueabihf -march=armv7-m -mthumb -Os -std=c99 -Werror -Wall -Wextra -Wwrite-strings -Wpointer-arith -Wimplicit-fallthrough -Wshadow -Wvla -Wformat=2 -Wno-format-nonliteral -Wshadow -Wasm-operand-widths -Wunused -I../tests/include/spe"
+
+ msg "build: TF-M config, gcc native build"
+ make clean
+ make lib CC="gcc" CFLAGS="-Os -std=c99 -Werror -Wall -Wextra -Wwrite-strings -Wpointer-arith -Wshadow -Wvla -Wformat=2 -Wno-format-nonliteral -Wshadow -Wformat-signedness -Wlogical-op -I../tests/include/spe"
+}
+
+# Test that the given .o file builds with all (valid) combinations of the given options.
+#
+# Syntax: build_test_config_combos FILE VALIDATOR_FUNCTION OPT1 OPT2 ...
+#
+# The validator function is the name of a function to validate the combination of options.
+# It may be "" if all combinations are valid.
+# It receives a string containing a combination of options, as passed to the compiler,
+# e.g. "-DOPT1 -DOPT2 ...". It must return 0 iff the combination is valid, non-zero if invalid.
+build_test_config_combos() {
+ file=$1
+ shift
+ validate_options=$1
+ shift
+ options=("$@")
+
+ # clear all of the options so that they can be overridden on the clang commandline
+ for opt in "${options[@]}"; do
+ ./scripts/config.py unset ${opt}
+ done
+
+ # enter the directory containing the target file & strip the dir from the filename
+ cd $(dirname ${file})
+ file=$(basename ${file})
+
+ # The most common issue is unused variables/functions, so ensure -Wunused is set.
+ warning_flags="-Werror -Wall -Wextra -Wwrite-strings -Wpointer-arith -Wimplicit-fallthrough -Wshadow -Wvla -Wformat=2 -Wno-format-nonliteral -Wshadow -Wasm-operand-widths -Wunused"
+
+ # Extract the command generated by the Makefile to build the target file.
+ # This ensures that we have any include paths, macro definitions, etc
+ # that may be applied by make.
+ # Add -fsyntax-only as we only want a syntax check and don't need to generate a file.
+ compile_cmd="clang \$(LOCAL_CFLAGS) ${warning_flags} -fsyntax-only -c"
+
+ makefile=$(TMPDIR=. mktemp)
+ deps=""
+
+ len=${#options[@]}
+ source_file=${file%.o}.c
+
+ targets=0
+ echo 'include Makefile' >${makefile}
+
+ for ((i = 0; i < $((2**${len})); i++)); do
+ # generate each of 2^n combinations of options
+ # each bit of $i is used to determine if options[i] will be set or not
+ target="t"
+ clang_args=""
+ for ((j = 0; j < ${len}; j++)); do
+ if (((i >> j) & 1)); then
+ opt=-D${options[$j]}
+ clang_args="${clang_args} ${opt}"
+ target="${target}${opt}"
+ fi
+ done
+
+ # if combination is not known to be invalid, add it to the makefile
+ if [[ -z $validate_options ]] || $validate_options "${clang_args}"; then
+ cmd="${compile_cmd} ${clang_args}"
+ echo "${target}: ${source_file}; $cmd ${source_file}" >> ${makefile}
+
+ deps="${deps} ${target}"
+ ((++targets))
+ fi
+ done
+
+ echo "build_test_config_combos: ${deps}" >> ${makefile}
+
+ # execute all of the commands via Make (probably in parallel)
+ make -s -f ${makefile} build_test_config_combos
+ echo "$targets targets checked"
+
+ # clean up the temporary makefile
+ rm ${makefile}
+}
+
+validate_aes_config_variations() {
+ if [[ "$1" == *"MBEDTLS_AES_USE_HARDWARE_ONLY"* ]]; then
+ if [[ "$1" == *"MBEDTLS_PADLOCK_C"* ]]; then
+ return 1
+ fi
+ if [[ !(("$HOSTTYPE" == "aarch64" && "$1" != *"MBEDTLS_AESCE_C"*) || \
+ ("$HOSTTYPE" == "x86_64" && "$1" != *"MBEDTLS_AESNI_C"*)) ]]; then
+ return 1
+ fi
+ fi
+ return 0
+}
+
+component_build_aes_variations() {
+ # 18s - around 90ms per clang invocation on M1 Pro
+ #
+ # aes.o has many #if defined(...) guards that intersect in complex ways.
+ # Test that all the combinations build cleanly.
+
+ MBEDTLS_ROOT_DIR="$PWD"
+ msg "build: aes.o for all combinations of relevant config options"
+
+ build_test_config_combos library/aes.o validate_aes_config_variations \
+ "MBEDTLS_AES_SETKEY_ENC_ALT" "MBEDTLS_AES_DECRYPT_ALT" \
+ "MBEDTLS_AES_ROM_TABLES" "MBEDTLS_AES_ENCRYPT_ALT" "MBEDTLS_AES_SETKEY_DEC_ALT" \
+ "MBEDTLS_AES_FEWER_TABLES" "MBEDTLS_PADLOCK_C" "MBEDTLS_AES_USE_HARDWARE_ONLY" \
+ "MBEDTLS_AESNI_C" "MBEDTLS_AESCE_C" "MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH"
+
+ cd "$MBEDTLS_ROOT_DIR"
+ msg "build: aes.o for all combinations of relevant config options + BLOCK_CIPHER_NO_DECRYPT"
+
+ # MBEDTLS_BLOCK_CIPHER_NO_DECRYPT is incompatible with ECB in PSA, CBC/XTS/NIST_KW/DES,
+ # manually set or unset those configurations to check
+ # MBEDTLS_BLOCK_CIPHER_NO_DECRYPT with various combinations in aes.o.
+ scripts/config.py set MBEDTLS_BLOCK_CIPHER_NO_DECRYPT
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_XTS
+ scripts/config.py unset MBEDTLS_DES_C
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+ build_test_config_combos library/aes.o validate_aes_config_variations \
+ "MBEDTLS_AES_SETKEY_ENC_ALT" "MBEDTLS_AES_DECRYPT_ALT" \
+ "MBEDTLS_AES_ROM_TABLES" "MBEDTLS_AES_ENCRYPT_ALT" "MBEDTLS_AES_SETKEY_DEC_ALT" \
+ "MBEDTLS_AES_FEWER_TABLES" "MBEDTLS_PADLOCK_C" "MBEDTLS_AES_USE_HARDWARE_ONLY" \
+ "MBEDTLS_AESNI_C" "MBEDTLS_AESCE_C" "MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH"
+}
+
+component_test_no_platform () {
+ # Full configuration build, without platform support, file IO and net sockets.
+ # This should catch missing mbedtls_printf definitions, and by disabling file
+ # IO, it should catch missing '#include <stdio.h>'
+ msg "build: full config except platform/fsio/net, make, gcc, C99" # ~ 30s
+ scripts/config.py full_no_platform
+ scripts/config.py unset MBEDTLS_PLATFORM_C
+ scripts/config.py unset MBEDTLS_NET_C
+ scripts/config.py unset MBEDTLS_FS_IO
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_SE_C
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_STORAGE_C
+ scripts/config.py unset MBEDTLS_PSA_ITS_FILE_C
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ # Note, _DEFAULT_SOURCE needs to be defined for platforms using glibc version >2.19,
+ # to re-enable platform integration features otherwise disabled in C99 builds
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -std=c99 -pedantic -Os -D_DEFAULT_SOURCE' lib programs
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -Os' test
+}
+
+component_build_no_std_function () {
+ # catch compile bugs in _uninit functions
+ msg "build: full config with NO_STD_FUNCTION, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_PLATFORM_NO_STD_FUNCTIONS
+ scripts/config.py unset MBEDTLS_ENTROPY_NV_SEED
+ scripts/config.py unset MBEDTLS_PLATFORM_NV_SEED_ALT
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Check .
+ make
+}
+
+component_build_no_ssl_srv () {
+ msg "build: full config except SSL server, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_SSL_SRV_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1'
+}
+
+component_build_no_ssl_cli () {
+ msg "build: full config except SSL client, make, gcc" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_SSL_CLI_C
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1'
+}
+
+component_build_no_sockets () {
+ # Note, C99 compliance can also be tested with the sockets support disabled,
+ # as that requires a POSIX platform (which isn't the same as C99).
+ msg "build: full config except net_sockets.c, make, gcc -std=c99 -pedantic" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_NET_C # getaddrinfo() undeclared, etc.
+ scripts/config.py set MBEDTLS_NO_PLATFORM_ENTROPY # uses syscall() on GNU/Linux
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -O1 -std=c99 -pedantic' lib
+}
+
+component_test_memory_buffer_allocator_backtrace () {
+ msg "build: default config with memory buffer allocator and backtrace enabled"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_MEMORY_BACKTRACE
+ scripts/config.py set MBEDTLS_MEMORY_DEBUG
+ cmake -DCMAKE_BUILD_TYPE:String=Release .
+ make
+
+ msg "test: MBEDTLS_MEMORY_BUFFER_ALLOC_C and MBEDTLS_MEMORY_BACKTRACE"
+ make test
+}
+
+component_test_memory_buffer_allocator () {
+ msg "build: default config with memory buffer allocator"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ cmake -DCMAKE_BUILD_TYPE:String=Release .
+ make
+
+ msg "test: MBEDTLS_MEMORY_BUFFER_ALLOC_C"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_MEMORY_BUFFER_ALLOC_C"
+ # MBEDTLS_MEMORY_BUFFER_ALLOC is slow. Skip tests that tend to time out.
+ tests/ssl-opt.sh -e '^DTLS proxy'
+}
+
+component_test_no_max_fragment_length () {
+ # Run max fragment length tests with MFL disabled
+ msg "build: default config except MFL extension (ASan build)" # ~ 30s
+ scripts/config.py unset MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: ssl-opt.sh, MFL-related tests"
+ tests/ssl-opt.sh -f "Max fragment length"
+}
+
+component_test_asan_remove_peer_certificate () {
+ msg "build: default config with MBEDTLS_SSL_KEEP_PEER_CERTIFICATE disabled (ASan build)"
+ scripts/config.py unset MBEDTLS_SSL_KEEP_PEER_CERTIFICATE
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ make test
+
+ msg "test: ssl-opt.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ tests/compat.sh
+
+ msg "test: context-info.sh, !MBEDTLS_SSL_KEEP_PEER_CERTIFICATE"
+ tests/context-info.sh
+}
+
+component_test_no_max_fragment_length_small_ssl_out_content_len () {
+ msg "build: no MFL extension, small SSL_OUT_CONTENT_LEN (ASan build)"
+ scripts/config.py unset MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ scripts/config.py set MBEDTLS_SSL_IN_CONTENT_LEN 16384
+ scripts/config.py set MBEDTLS_SSL_OUT_CONTENT_LEN 4096
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MFL tests (disabled MFL extension case) & large packet tests"
+ tests/ssl-opt.sh -f "Max fragment length\|Large buffer"
+
+ msg "test: context-info.sh (disabled MFL extension case)"
+ tests/context-info.sh
+}
+
+component_test_variable_ssl_in_out_buffer_len () {
+ msg "build: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh, MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH enabled"
+ tests/compat.sh
+}
+
+component_test_dtls_cid_legacy () {
+ msg "build: MBEDTLS_SSL_DTLS_CONNECTION_ID (legacy) enabled (ASan build)"
+ scripts/config.py set MBEDTLS_SSL_DTLS_CONNECTION_ID_COMPAT 1
+
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_SSL_DTLS_CONNECTION_ID (legacy)"
+ make test
+
+ msg "test: ssl-opt.sh, MBEDTLS_SSL_DTLS_CONNECTION_ID (legacy) enabled"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh, MBEDTLS_SSL_DTLS_CONNECTION_ID (legacy) enabled"
+ tests/compat.sh
+}
+
+component_test_ssl_alloc_buffer_and_mfl () {
+ msg "build: default config with memory buffer allocator and MFL extension"
+ scripts/config.py set MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_MEMORY_DEBUG
+ scripts/config.py set MBEDTLS_SSL_MAX_FRAGMENT_LENGTH
+ scripts/config.py set MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH
+ cmake -DCMAKE_BUILD_TYPE:String=Release .
+ make
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH, MBEDTLS_MEMORY_BUFFER_ALLOC_C, MBEDTLS_MEMORY_DEBUG and MBEDTLS_SSL_MAX_FRAGMENT_LENGTH"
+ make test
+
+ msg "test: MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH, MBEDTLS_MEMORY_BUFFER_ALLOC_C, MBEDTLS_MEMORY_DEBUG and MBEDTLS_SSL_MAX_FRAGMENT_LENGTH"
+ tests/ssl-opt.sh -f "Handshake memory usage"
+}
+
+component_test_when_no_ciphersuites_have_mac () {
+ msg "build: when no ciphersuites have MAC"
+ scripts/config.py unset MBEDTLS_CIPHER_NULL_CIPHER
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CMAC_C
+ make
+
+ msg "test: !MBEDTLS_SSL_SOME_MODES_USE_MAC"
+ make test
+
+ msg "test ssl-opt.sh: !MBEDTLS_SSL_SOME_MODES_USE_MAC"
+ tests/ssl-opt.sh -f 'Default\|EtM' -e 'without EtM'
+}
+
+component_test_no_date_time () {
+ msg "build: default config without MBEDTLS_HAVE_TIME_DATE"
+ scripts/config.py unset MBEDTLS_HAVE_TIME_DATE
+ cmake -D CMAKE_BUILD_TYPE:String=Check .
+ make
+
+ msg "test: !MBEDTLS_HAVE_TIME_DATE - main suites"
+ make test
+}
+
+component_test_platform_calloc_macro () {
+ msg "build: MBEDTLS_PLATFORM_{CALLOC/FREE}_MACRO enabled (ASan build)"
+ scripts/config.py set MBEDTLS_PLATFORM_MEMORY
+ scripts/config.py set MBEDTLS_PLATFORM_CALLOC_MACRO calloc
+ scripts/config.py set MBEDTLS_PLATFORM_FREE_MACRO free
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_PLATFORM_{CALLOC/FREE}_MACRO enabled (ASan build)"
+ make test
+}
+
+component_test_malloc_0_null () {
+ msg "build: malloc(0) returns NULL (ASan+UBSan build)"
+ scripts/config.py full
+ make CC=$ASAN_CC CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"$PWD/tests/configs/user-config-malloc-0-null.h\"' $ASAN_CFLAGS" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: malloc(0) returns NULL (ASan+UBSan build)"
+ make test
+
+ msg "selftest: malloc(0) returns NULL (ASan+UBSan build)"
+ # Just the calloc selftest. "make test" ran the others as part of the
+ # test suites.
+ programs/test/selftest calloc
+
+ msg "test ssl-opt.sh: malloc(0) returns NULL (ASan+UBSan build)"
+ # Run a subset of the tests. The choice is a balance between coverage
+ # and time (including time indirectly wasted due to flaky tests).
+ # The current choice is to skip tests whose description includes
+ # "proxy", which is an approximation of skipping tests that use the
+ # UDP proxy, which tend to be slower and flakier.
+ tests/ssl-opt.sh -e 'proxy'
+}
+
+support_test_aesni() {
+ # Check that gcc targets x86_64 (we can build AESNI), and check for
+ # AESNI support on the host (we can run AESNI).
+ #
+ # The name of this function is possibly slightly misleading, but needs to align
+ # with the name of the corresponding test, component_test_aesni.
+ #
+ # In principle 32-bit x86 can support AESNI, but our implementation does not
+ # support 32-bit x86, so we check for x86-64.
+ # We can only grep /proc/cpuinfo on Linux, so this also checks for Linux
+ (gcc -v 2>&1 | grep Target | grep -q x86_64) &&
+ [[ "$HOSTTYPE" == "x86_64" && "$OSTYPE" == "linux-gnu" ]] &&
+ (lscpu | grep -qw aes)
+}
+
+component_test_aesni () { # ~ 60s
+ # This tests the two AESNI implementations (intrinsics and assembly), and also the plain C
+ # fallback. It also tests the logic that is used to select which implementation(s) to build.
+ #
+ # This test does not require the host to have support for AESNI (if it doesn't, the run-time
+ # AESNI detection will fallback to the plain C implementation, so the tests will instead
+ # exercise the plain C impl).
+
+ msg "build: default config with different AES implementations"
+ scripts/config.py set MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ # test the intrinsics implementation
+ msg "AES tests, test intrinsics"
+ make clean
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -mpclmul -msse2 -maes'
+ # check that we built intrinsics - this should be used by default when supported by the compiler
+ ./programs/test/selftest aes | grep "AESNI code" | grep -q "intrinsics"
+
+ # test the asm implementation
+ msg "AES tests, test assembly"
+ make clean
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -mno-pclmul -mno-sse2 -mno-aes'
+ # check that we built assembly - this should be built if the compiler does not support intrinsics
+ ./programs/test/selftest aes | grep "AESNI code" | grep -q "assembly"
+
+ # test the plain C implementation
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ msg "AES tests, plain C"
+ make clean
+ make CC=gcc CFLAGS='-O2 -Werror'
+ # check that there is no AESNI code present
+ ./programs/test/selftest aes | not grep -q "AESNI code"
+ not grep -q "AES note: using AESNI" ./programs/test/selftest
+ grep -q "AES note: built-in implementation." ./programs/test/selftest
+
+ # test the intrinsics implementation
+ scripts/config.py set MBEDTLS_AESNI_C
+ scripts/config.py set MBEDTLS_AES_USE_HARDWARE_ONLY
+ msg "AES tests, test AESNI only"
+ make clean
+ make CC=gcc CFLAGS='-Werror -Wall -Wextra -mpclmul -msse2 -maes'
+ ./programs/test/selftest aes | grep -q "AES note: using AESNI"
+ ./programs/test/selftest aes | not grep -q "AES note: built-in implementation."
+ grep -q "AES note: using AESNI" ./programs/test/selftest
+ not grep -q "AES note: built-in implementation." ./programs/test/selftest
+}
+
+component_test_sha3_variations() {
+ msg "sha3 loop unroll variations"
+
+ # define minimal config sufficient to test SHA3
+ cat > include/mbedtls/mbedtls_config.h << END
+ #define MBEDTLS_SELF_TEST
+ #define MBEDTLS_SHA3_C
+END
+
+ msg "all loops unrolled"
+ make clean
+ make -C tests test_suite_shax CFLAGS="-DMBEDTLS_SHA3_THETA_UNROLL=1 -DMBEDTLS_SHA3_PI_UNROLL=1 -DMBEDTLS_SHA3_CHI_UNROLL=1 -DMBEDTLS_SHA3_RHO_UNROLL=1"
+ ./tests/test_suite_shax
+
+ msg "all loops rolled up"
+ make clean
+ make -C tests test_suite_shax CFLAGS="-DMBEDTLS_SHA3_THETA_UNROLL=0 -DMBEDTLS_SHA3_PI_UNROLL=0 -DMBEDTLS_SHA3_CHI_UNROLL=0 -DMBEDTLS_SHA3_RHO_UNROLL=0"
+ ./tests/test_suite_shax
+}
+
+support_test_aesni_m32() {
+ support_test_m32_no_asm && (lscpu | grep -qw aes)
+}
+
+component_test_aesni_m32 () { # ~ 60s
+ # This tests are duplicated from component_test_aesni for i386 target
+ #
+ # AESNI intrinsic code supports i386 and assembly code does not support it.
+
+ msg "build: default config with different AES implementations"
+ scripts/config.py set MBEDTLS_AESNI_C
+ scripts/config.py set MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ # test the intrinsics implementation with gcc
+ msg "AES tests, test intrinsics (gcc)"
+ make clean
+ make CC=gcc CFLAGS='-m32 -Werror -Wall -Wextra' LDFLAGS='-m32'
+ # check that we built intrinsics - this should be used by default when supported by the compiler
+ ./programs/test/selftest aes | grep "AESNI code" | grep -q "intrinsics"
+ grep -q "AES note: using AESNI" ./programs/test/selftest
+ grep -q "AES note: built-in implementation." ./programs/test/selftest
+ grep -q "AES note: using VIA Padlock" ./programs/test/selftest
+ grep -q mbedtls_aesni_has_support ./programs/test/selftest
+
+ scripts/config.py set MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py set MBEDTLS_AES_USE_HARDWARE_ONLY
+ msg "AES tests, test AESNI only"
+ make clean
+ make CC=gcc CFLAGS='-m32 -Werror -Wall -Wextra -mpclmul -msse2 -maes' LDFLAGS='-m32'
+ ./programs/test/selftest aes | grep -q "AES note: using AESNI"
+ ./programs/test/selftest aes | not grep -q "AES note: built-in implementation."
+ grep -q "AES note: using AESNI" ./programs/test/selftest
+ not grep -q "AES note: built-in implementation." ./programs/test/selftest
+ not grep -q "AES note: using VIA Padlock" ./programs/test/selftest
+ not grep -q mbedtls_aesni_has_support ./programs/test/selftest
+}
+
+support_test_aesni_m32_clang() {
+ # clang >= 4 is required to build with target attributes
+ support_test_aesni_m32 && [[ $(clang_version) -ge 4 ]]
+}
+
+component_test_aesni_m32_clang() {
+
+ scripts/config.py set MBEDTLS_AESNI_C
+ scripts/config.py set MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ # test the intrinsics implementation with clang
+ msg "AES tests, test intrinsics (clang)"
+ make clean
+ make CC=clang CFLAGS='-m32 -Werror -Wall -Wextra' LDFLAGS='-m32'
+ # check that we built intrinsics - this should be used by default when supported by the compiler
+ ./programs/test/selftest aes | grep "AESNI code" | grep -q "intrinsics"
+ grep -q "AES note: using AESNI" ./programs/test/selftest
+ grep -q "AES note: built-in implementation." ./programs/test/selftest
+ grep -q "AES note: using VIA Padlock" ./programs/test/selftest
+ grep -q mbedtls_aesni_has_support ./programs/test/selftest
+}
+
+# For timebeing, no aarch64 gcc available in CI and no arm64 CI node.
+component_build_aes_aesce_armcc () {
+ msg "Build: AESCE test on arm64 platform without plain C."
+ scripts/config.py baremetal
+
+ # armc[56] don't support SHA-512 intrinsics
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT
+
+ # Stop armclang warning about feature detection for A64_CRYPTO.
+ # With this enabled, the library does build correctly under armclang,
+ # but in baremetal builds (as tested here), feature detection is
+ # unavailable, and the user is notified via a #warning. So enabling
+ # this feature would prevent us from building with -Werror on
+ # armclang. Tracked in #7198.
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ msg "AESCE, build with default configuration."
+ scripts/config.py set MBEDTLS_AESCE_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ armc6_build_test "-O1 --target=aarch64-arm-none-eabi -march=armv8-a+crypto"
+
+ msg "AESCE, build AESCE only"
+ scripts/config.py set MBEDTLS_AESCE_C
+ scripts/config.py set MBEDTLS_AES_USE_HARDWARE_ONLY
+ armc6_build_test "-O1 --target=aarch64-arm-none-eabi -march=armv8-a+crypto"
+}
+
+support_build_aes_armce() {
+ # clang >= 11 is required to build with AES extensions
+ [[ $(clang_version) -ge 11 ]]
+}
+
+component_build_aes_armce () {
+ # Test variations of AES with Armv8 crypto extensions
+ scripts/config.py set MBEDTLS_AESCE_C
+ scripts/config.py set MBEDTLS_AES_USE_HARDWARE_ONLY
+
+ msg "MBEDTLS_AES_USE_HARDWARE_ONLY, clang, aarch64"
+ make -B library/aesce.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a+crypto"
+
+ msg "MBEDTLS_AES_USE_HARDWARE_ONLY, clang, arm"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm"
+
+ msg "MBEDTLS_AES_USE_HARDWARE_ONLY, clang, thumb"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb"
+
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+
+ msg "no MBEDTLS_AES_USE_HARDWARE_ONLY, clang, aarch64"
+ make -B library/aesce.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a+crypto"
+
+ msg "no MBEDTLS_AES_USE_HARDWARE_ONLY, clang, arm"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm"
+
+ msg "no MBEDTLS_AES_USE_HARDWARE_ONLY, clang, thumb"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb"
+
+ # test for presence of AES instructions
+ scripts/config.py set MBEDTLS_AES_USE_HARDWARE_ONLY
+ msg "clang, test A32 crypto instructions built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm -S"
+ grep -E 'aes[0-9a-z]+.[0-9]\s*[qv]' library/aesce.o
+ msg "clang, test T32 crypto instructions built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb -S"
+ grep -E 'aes[0-9a-z]+.[0-9]\s*[qv]' library/aesce.o
+ msg "clang, test aarch64 crypto instructions built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a -S"
+ grep -E 'aes[a-z]+\s*[qv]' library/aesce.o
+
+ # test for absence of AES instructions
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ scripts/config.py unset MBEDTLS_AESCE_C
+ msg "clang, test A32 crypto instructions not built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm -S"
+ not grep -E 'aes[0-9a-z]+.[0-9]\s*[qv]' library/aesce.o
+ msg "clang, test T32 crypto instructions not built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb -S"
+ not grep -E 'aes[0-9a-z]+.[0-9]\s*[qv]' library/aesce.o
+ msg "clang, test aarch64 crypto instructions not built"
+ make -B library/aesce.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a -S"
+ not grep -E 'aes[a-z]+\s*[qv]' library/aesce.o
+}
+
+support_build_sha_armce() {
+ # clang >= 4 is required to build with SHA extensions
+ [[ $(clang_version) -ge 4 ]]
+}
+
+component_build_sha_armce () {
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+
+
+ # Test variations of SHA256 Armv8 crypto extensions
+ scripts/config.py set MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY
+ msg "MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY clang, aarch64"
+ make -B library/sha256.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a"
+ msg "MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY clang, arm"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm"
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY
+
+
+ # test the deprecated form of the config option
+ scripts/config.py set MBEDTLS_SHA256_USE_A64_CRYPTO_ONLY
+ msg "MBEDTLS_SHA256_USE_A64_CRYPTO_ONLY clang, thumb"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb"
+ scripts/config.py unset MBEDTLS_SHA256_USE_A64_CRYPTO_ONLY
+
+ scripts/config.py set MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+ msg "MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT clang, aarch64"
+ make -B library/sha256.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a"
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+
+
+ # test the deprecated form of the config option
+ scripts/config.py set MBEDTLS_SHA256_USE_A64_CRYPTO_IF_PRESENT
+ msg "MBEDTLS_SHA256_USE_A64_CRYPTO_IF_PRESENT clang, arm"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm -std=c99"
+ msg "MBEDTLS_SHA256_USE_A64_CRYPTO_IF_PRESENT clang, thumb"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb"
+ scripts/config.py unset MBEDTLS_SHA256_USE_A64_CRYPTO_IF_PRESENT
+
+
+ # examine the disassembly for presence of SHA instructions
+ for opt in MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT; do
+ scripts/config.py set ${opt}
+ msg "${opt} clang, test A32 crypto instructions built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm -S"
+ grep -E 'sha256[a-z0-9]+.32\s+[qv]' library/sha256.o
+
+ msg "${opt} clang, test T32 crypto instructions built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb -S"
+ grep -E 'sha256[a-z0-9]+.32\s+[qv]' library/sha256.o
+
+ msg "${opt} clang, test aarch64 crypto instructions built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a -S"
+ grep -E 'sha256[a-z0-9]+\s+[qv]' library/sha256.o
+ scripts/config.py unset ${opt}
+ done
+
+
+ # examine the disassembly for absence of SHA instructions
+ msg "clang, test A32 crypto instructions not built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a72+crypto -marm -S"
+ not grep -E 'sha256[a-z0-9]+.32\s+[qv]' library/sha256.o
+
+ msg "clang, test T32 crypto instructions not built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=arm-linux-gnueabihf -mcpu=cortex-a32+crypto -mthumb -S"
+ not grep -E 'sha256[a-z0-9]+.32\s+[qv]' library/sha256.o
+
+ msg "clang, test aarch64 crypto instructions not built"
+ make -B library/sha256.o CC=clang CFLAGS="--target=aarch64-linux-gnu -march=armv8-a -S"
+ not grep -E 'sha256[a-z0-9]+\s+[qv]' library/sha256.o
+}
+
+# For timebeing, no VIA Padlock platform available.
+component_build_aes_via_padlock () {
+
+ msg "AES:VIA PadLock, build with default configuration."
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py set MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AES_USE_HARDWARE_ONLY
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32" LDFLAGS="-m32 $ASAN_CFLAGS"
+ grep -q mbedtls_padlock_has_support ./programs/test/selftest
+
+}
+
+support_build_aes_via_padlock_only () {
+ ( [ "$MBEDTLS_TEST_PLATFORM" == "Linux-x86_64" ] || \
+ [ "$MBEDTLS_TEST_PLATFORM" == "Linux-amd64" ] ) && \
+ [ "`dpkg --print-foreign-architectures`" == "i386" ]
+}
+
+support_build_aes_aesce_armcc () {
+ support_build_armcc
+}
+
+component_test_aes_only_128_bit_keys () {
+ msg "build: default config + AES_ONLY_128_BIT_KEY_LENGTH"
+ scripts/config.py set MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: default config + AES_ONLY_128_BIT_KEY_LENGTH"
+ make test
+}
+
+component_test_no_ctr_drbg_aes_only_128_bit_keys () {
+ msg "build: default config + AES_ONLY_128_BIT_KEY_LENGTH - CTR_DRBG_C"
+ scripts/config.py set MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH
+ scripts/config.py unset MBEDTLS_CTR_DRBG_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+
+ make CC=clang CFLAGS='-Werror -Wall -Wextra'
+
+ msg "test: default config + AES_ONLY_128_BIT_KEY_LENGTH - CTR_DRBG_C"
+ make test
+}
+
+component_test_aes_only_128_bit_keys_have_builtins () {
+ msg "build: default config + AES_ONLY_128_BIT_KEY_LENGTH - AESNI_C - AESCE_C"
+ scripts/config.py set MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: default config + AES_ONLY_128_BIT_KEY_LENGTH - AESNI_C - AESCE_C"
+ make test
+
+ msg "selftest: default config + AES_ONLY_128_BIT_KEY_LENGTH - AESNI_C - AESCE_C"
+ programs/test/selftest
+}
+
+component_test_gcm_largetable () {
+ msg "build: default config + GCM_LARGE_TABLE - AESNI_C - AESCE_C"
+ scripts/config.py set MBEDTLS_GCM_LARGE_TABLE
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: default config - GCM_LARGE_TABLE - AESNI_C - AESCE_C"
+ make test
+}
+
+component_test_aes_fewer_tables () {
+ msg "build: default config with AES_FEWER_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_FEWER_TABLES
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: AES_FEWER_TABLES"
+ make test
+}
+
+component_test_aes_rom_tables () {
+ msg "build: default config with AES_ROM_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_ROM_TABLES
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: AES_ROM_TABLES"
+ make test
+}
+
+component_test_aes_fewer_tables_and_rom_tables () {
+ msg "build: default config with AES_ROM_TABLES and AES_FEWER_TABLES enabled"
+ scripts/config.py set MBEDTLS_AES_FEWER_TABLES
+ scripts/config.py set MBEDTLS_AES_ROM_TABLES
+ make CFLAGS='-O2 -Werror -Wall -Wextra'
+
+ msg "test: AES_FEWER_TABLES + AES_ROM_TABLES"
+ make test
+}
+
+# helper for common_block_cipher_no_decrypt() which:
+# - enable/disable the list of config options passed from -s/-u respectively.
+# - build
+# - test for tests_suite_xxx
+# - selftest
+#
+# Usage: helper_block_cipher_no_decrypt_build_test
+# [-s set_opts] [-u unset_opts] [-c cflags] [-l ldflags] [option [...]]
+# Options: -s set_opts the list of config options to enable
+# -u unset_opts the list of config options to disable
+# -c cflags the list of options passed to CFLAGS
+# -l ldflags the list of options passed to LDFLAGS
+helper_block_cipher_no_decrypt_build_test () {
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -s)
+ shift; local set_opts="$1";;
+ -u)
+ shift; local unset_opts="$1";;
+ -c)
+ shift; local cflags="-Werror -Wall -Wextra $1";;
+ -l)
+ shift; local ldflags="$1";;
+ esac
+ shift
+ done
+ set_opts="${set_opts:-}"
+ unset_opts="${unset_opts:-}"
+ cflags="${cflags:-}"
+ ldflags="${ldflags:-}"
+
+ [ -n "$set_opts" ] && echo "Enabling: $set_opts" && scripts/config.py set-all $set_opts
+ [ -n "$unset_opts" ] && echo "Disabling: $unset_opts" && scripts/config.py unset-all $unset_opts
+
+ msg "build: default config + BLOCK_CIPHER_NO_DECRYPT${set_opts:+ + $set_opts}${unset_opts:+ - $unset_opts} with $cflags${ldflags:+, $ldflags}"
+ make clean
+ make CFLAGS="-O2 $cflags" LDFLAGS="$ldflags"
+
+ # Make sure we don't have mbedtls_xxx_setkey_dec in AES/ARIA/CAMELLIA
+ not grep mbedtls_aes_setkey_dec library/aes.o
+ not grep mbedtls_aria_setkey_dec library/aria.o
+ not grep mbedtls_camellia_setkey_dec library/camellia.o
+ # Make sure we don't have mbedtls_internal_aes_decrypt in AES
+ not grep mbedtls_internal_aes_decrypt library/aes.o
+ # Make sure we don't have mbedtls_aesni_inverse_key in AESNI
+ not grep mbedtls_aesni_inverse_key library/aesni.o
+
+ msg "test: default config + BLOCK_CIPHER_NO_DECRYPT${set_opts:+ + $set_opts}${unset_opts:+ - $unset_opts} with $cflags${ldflags:+, $ldflags}"
+ make test
+
+ msg "selftest: default config + BLOCK_CIPHER_NO_DECRYPT${set_opts:+ + $set_opts}${unset_opts:+ - $unset_opts} with $cflags${ldflags:+, $ldflags}"
+ programs/test/selftest
+}
+
+# This is a common configuration function used in:
+# - component_test_block_cipher_no_decrypt_aesni_legacy()
+# - component_test_block_cipher_no_decrypt_aesni_use_psa()
+# in order to test BLOCK_CIPHER_NO_DECRYPT with AESNI intrinsics,
+# AESNI assembly and AES C implementation on x86_64 and with AESNI intrinsics
+# on x86.
+common_block_cipher_no_decrypt () {
+ # test AESNI intrinsics
+ helper_block_cipher_no_decrypt_build_test \
+ -s "MBEDTLS_AESNI_C" \
+ -c "-mpclmul -msse2 -maes"
+
+ # test AESNI assembly
+ helper_block_cipher_no_decrypt_build_test \
+ -s "MBEDTLS_AESNI_C" \
+ -c "-mno-pclmul -mno-sse2 -mno-aes"
+
+ # test AES C implementation
+ helper_block_cipher_no_decrypt_build_test \
+ -u "MBEDTLS_AESNI_C"
+
+ # test AESNI intrinsics for i386 target
+ helper_block_cipher_no_decrypt_build_test \
+ -s "MBEDTLS_AESNI_C" \
+ -c "-m32 -mpclmul -msse2 -maes" \
+ -l "-m32"
+}
+
+# This is a configuration function used in component_test_block_cipher_no_decrypt_xxx:
+# usage: 0: no PSA crypto configuration
+# 1: use PSA crypto configuration
+config_block_cipher_no_decrypt () {
+ use_psa=$1
+
+ scripts/config.py set MBEDTLS_BLOCK_CIPHER_NO_DECRYPT
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_CBC
+ scripts/config.py unset MBEDTLS_CIPHER_MODE_XTS
+ scripts/config.py unset MBEDTLS_DES_C
+ scripts/config.py unset MBEDTLS_NIST_KW_C
+
+ if [ "$use_psa" -eq 1 ]; then
+ # Enable support for cryptographic mechanisms through the PSA API.
+ # Note: XTS, KW are not yet supported via the PSA API in Mbed TLS.
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_NO_PADDING
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_CBC_PKCS7
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_ALG_ECB_NO_PADDING
+ scripts/config.py -f "$CRYPTO_CONFIG_H" unset PSA_WANT_KEY_TYPE_DES
+ fi
+}
+
+component_test_block_cipher_no_decrypt_aesni () {
+ # This consistently causes an llvm crash on clang 3.8, so use gcc
+ export CC=gcc
+ config_block_cipher_no_decrypt 0
+ common_block_cipher_no_decrypt
+}
+
+component_test_block_cipher_no_decrypt_aesni_use_psa () {
+ # This consistently causes an llvm crash on clang 3.8, so use gcc
+ export CC=gcc
+ config_block_cipher_no_decrypt 1
+ common_block_cipher_no_decrypt
+}
+
+support_test_block_cipher_no_decrypt_aesce_armcc () {
+ support_build_armcc
+}
+
+component_test_block_cipher_no_decrypt_aesce_armcc () {
+ scripts/config.py baremetal
+
+ # armc[56] don't support SHA-512 intrinsics
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT
+
+ # Stop armclang warning about feature detection for A64_CRYPTO.
+ # With this enabled, the library does build correctly under armclang,
+ # but in baremetal builds (as tested here), feature detection is
+ # unavailable, and the user is notified via a #warning. So enabling
+ # this feature would prevent us from building with -Werror on
+ # armclang. Tracked in #7198.
+ scripts/config.py unset MBEDTLS_SHA256_USE_A64_CRYPTO_IF_PRESENT
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ config_block_cipher_no_decrypt 1
+
+ # test AESCE baremetal build
+ scripts/config.py set MBEDTLS_AESCE_C
+ msg "build: default config + BLOCK_CIPHER_NO_DECRYPT with AESCE"
+ armc6_build_test "-O1 --target=aarch64-arm-none-eabi -march=armv8-a+crypto -Werror -Wall -Wextra"
+
+ # Make sure we don't have mbedtls_xxx_setkey_dec in AES/ARIA/CAMELLIA
+ not grep mbedtls_aes_setkey_dec library/aes.o
+ not grep mbedtls_aria_setkey_dec library/aria.o
+ not grep mbedtls_camellia_setkey_dec library/camellia.o
+ # Make sure we don't have mbedtls_internal_aes_decrypt in AES
+ not grep mbedtls_internal_aes_decrypt library/aes.o
+ # Make sure we don't have mbedtls_aesce_inverse_key and aesce_decrypt_block in AESCE
+ not grep mbedtls_aesce_inverse_key library/aesce.o
+ not grep aesce_decrypt_block library/aesce.o
+}
+
+component_test_ctr_drbg_aes_256_sha_256 () {
+ msg "build: full + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_ENTROPY_FORCE_SHA256
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ make test
+}
+
+component_test_ctr_drbg_aes_128_sha_512 () {
+ msg "build: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_CTR_DRBG_USE_128_BIT_KEY
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY (ASan build)"
+ make test
+}
+
+component_test_ctr_drbg_aes_128_sha_256 () {
+ msg "build: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_MEMORY_BUFFER_ALLOC_C
+ scripts/config.py set MBEDTLS_CTR_DRBG_USE_128_BIT_KEY
+ scripts/config.py set MBEDTLS_ENTROPY_FORCE_SHA256
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: full + MBEDTLS_CTR_DRBG_USE_128_BIT_KEY + MBEDTLS_ENTROPY_FORCE_SHA256 (ASan build)"
+ make test
+}
+
+component_test_se_default () {
+ msg "build: default config + MBEDTLS_PSA_CRYPTO_SE_C"
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_SE_C
+ make CC=clang CFLAGS="$ASAN_CFLAGS -Os" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: default config + MBEDTLS_PSA_CRYPTO_SE_C"
+ make test
+}
+
+component_test_psa_crypto_drivers () {
+ msg "build: full + test drivers dispatching to builtins"
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_PSA_CRYPTO_CONFIG
+ loc_cflags="$ASAN_CFLAGS -DPSA_CRYPTO_DRIVER_TEST_ALL"
+ loc_cflags="${loc_cflags} '-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/user-config-for-test.h\"'"
+ loc_cflags="${loc_cflags} -I../tests/include -O2"
+
+ make CC=$ASAN_CC CFLAGS="${loc_cflags}" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: full + test drivers dispatching to builtins"
+ make test
+}
+
+component_test_make_shared () {
+ msg "build/test: make shared" # ~ 40s
+ make SHARED=1 all check
+ ldd programs/util/strerror | grep libmbedcrypto
+ programs/test/dlopen_demo.sh
+}
+
+component_test_cmake_shared () {
+ msg "build/test: cmake shared" # ~ 2min
+ cmake -DUSE_SHARED_MBEDTLS_LIBRARY=On .
+ make
+ ldd programs/util/strerror | grep libmbedcrypto
+ make test
+ programs/test/dlopen_demo.sh
+}
+
+test_build_opt () {
+ info=$1 cc=$2; shift 2
+ $cc --version
+ for opt in "$@"; do
+ msg "build/test: $cc $opt, $info" # ~ 30s
+ make CC="$cc" CFLAGS="$opt -std=c99 -pedantic -Wall -Wextra -Werror"
+ # We're confident enough in compilers to not run _all_ the tests,
+ # but at least run the unit tests. In particular, runs with
+ # optimizations use inline assembly whereas runs with -O0
+ # skip inline assembly.
+ make test # ~30s
+ make clean
+ done
+}
+
+# For FreeBSD we invoke the function by name so this condition is added
+# to disable the existing test_clang_opt function for linux.
+if [[ $(uname) != "Linux" ]]; then
+ component_test_clang_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' clang -O0 -Os -O2
+ }
+fi
+
+component_test_clang_latest_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' "$CLANG_LATEST" -O0 -Os -O2
+}
+support_test_clang_latest_opt () {
+ type "$CLANG_LATEST" >/dev/null 2>/dev/null
+}
+
+component_test_clang_earliest_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' "$CLANG_EARLIEST" -O0
+}
+support_test_clang_earliest_opt () {
+ type "$CLANG_EARLIEST" >/dev/null 2>/dev/null
+}
+
+component_test_gcc_latest_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' "$GCC_LATEST" -O0 -Os -O2
+}
+support_test_gcc_latest_opt () {
+ type "$GCC_LATEST" >/dev/null 2>/dev/null
+}
+
+component_test_gcc_earliest_opt () {
+ scripts/config.py full
+ test_build_opt 'full config' "$GCC_EARLIEST" -O0
+}
+support_test_gcc_earliest_opt () {
+ type "$GCC_EARLIEST" >/dev/null 2>/dev/null
+}
+
+component_build_mbedtls_config_file () {
+ msg "build: make with MBEDTLS_CONFIG_FILE" # ~40s
+ scripts/config.py -w full_config.h full
+ echo '#error "MBEDTLS_CONFIG_FILE is not working"' >"$CONFIG_H"
+ make CFLAGS="-I '$PWD' -DMBEDTLS_CONFIG_FILE='\"full_config.h\"'"
+ # Make sure this feature is enabled. We'll disable it in the next phase.
+ programs/test/query_compile_time_config MBEDTLS_NIST_KW_C
+ make clean
+
+ msg "build: make with MBEDTLS_CONFIG_FILE + MBEDTLS_USER_CONFIG_FILE"
+ # In the user config, disable one feature (for simplicity, pick a feature
+ # that nothing else depends on).
+ echo '#undef MBEDTLS_NIST_KW_C' >user_config.h
+ make CFLAGS="-I '$PWD' -DMBEDTLS_CONFIG_FILE='\"full_config.h\"' -DMBEDTLS_USER_CONFIG_FILE='\"user_config.h\"'"
+ not programs/test/query_compile_time_config MBEDTLS_NIST_KW_C
+
+ rm -f user_config.h full_config.h
+}
+
+component_build_psa_config_file () {
+ msg "build: make with MBEDTLS_PSA_CRYPTO_CONFIG_FILE" # ~40s
+ scripts/config.py set MBEDTLS_PSA_CRYPTO_CONFIG
+ cp "$CRYPTO_CONFIG_H" psa_test_config.h
+ echo '#error "MBEDTLS_PSA_CRYPTO_CONFIG_FILE is not working"' >"$CRYPTO_CONFIG_H"
+ make CFLAGS="-I '$PWD' -DMBEDTLS_PSA_CRYPTO_CONFIG_FILE='\"psa_test_config.h\"'"
+ # Make sure this feature is enabled. We'll disable it in the next phase.
+ programs/test/query_compile_time_config MBEDTLS_CMAC_C
+ make clean
+
+ msg "build: make with MBEDTLS_PSA_CRYPTO_CONFIG_FILE + MBEDTLS_PSA_CRYPTO_USER_CONFIG_FILE" # ~40s
+ # In the user config, disable one feature, which will reflect on the
+ # mbedtls configuration so we can query it with query_compile_time_config.
+ echo '#undef PSA_WANT_ALG_CMAC' >psa_user_config.h
+ scripts/config.py unset MBEDTLS_CMAC_C
+ make CFLAGS="-I '$PWD' -DMBEDTLS_PSA_CRYPTO_CONFIG_FILE='\"psa_test_config.h\"' -DMBEDTLS_PSA_CRYPTO_USER_CONFIG_FILE='\"psa_user_config.h\"'"
+ not programs/test/query_compile_time_config MBEDTLS_CMAC_C
+
+ rm -f psa_test_config.h psa_user_config.h
+}
+
+component_build_psa_alt_headers () {
+ msg "build: make with PSA alt headers" # ~20s
+
+ # Generate alternative versions of the substitutable headers with the
+ # same content except different include guards.
+ make -C tests include/alt-extra/psa/crypto_platform_alt.h include/alt-extra/psa/crypto_struct_alt.h
+
+ # Build the library and some programs.
+ # Don't build the fuzzers to avoid having to go through hoops to set
+ # a correct include path for programs/fuzz/Makefile.
+ make CFLAGS="-I ../tests/include/alt-extra -DMBEDTLS_PSA_CRYPTO_PLATFORM_FILE='\"psa/crypto_platform_alt.h\"' -DMBEDTLS_PSA_CRYPTO_STRUCT_FILE='\"psa/crypto_struct_alt.h\"'" lib
+ make -C programs -o fuzz CFLAGS="-I ../tests/include/alt-extra -DMBEDTLS_PSA_CRYPTO_PLATFORM_FILE='\"psa/crypto_platform_alt.h\"' -DMBEDTLS_PSA_CRYPTO_STRUCT_FILE='\"psa/crypto_struct_alt.h\"'"
+
+ # Check that we're getting the alternative include guards and not the
+ # original include guards.
+ programs/test/query_included_headers | grep -x PSA_CRYPTO_PLATFORM_ALT_H
+ programs/test/query_included_headers | grep -x PSA_CRYPTO_STRUCT_ALT_H
+ programs/test/query_included_headers | not grep -x PSA_CRYPTO_PLATFORM_H
+ programs/test/query_included_headers | not grep -x PSA_CRYPTO_STRUCT_H
+}
+
+component_test_m32_no_asm () {
+ # Build without assembly, so as to use portable C code (in a 32-bit
+ # build) and not the i386-specific inline assembly.
+ #
+ # Note that we require gcc, because clang Asan builds fail to link for
+ # this target (cannot find libclang_rt.lsan-i386.a - this is a known clang issue).
+ msg "build: i386, make, gcc, no asm (ASan build)" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESNI_C # AESNI for 32-bit is tested in test_aesni_m32
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, make, gcc, no asm (ASan build)"
+ make test
+}
+support_test_m32_no_asm () {
+ case $(uname -m) in
+ amd64|x86_64) true;;
+ *) false;;
+ esac
+}
+
+component_test_m32_o2 () {
+ # Build with optimization, to use the i386 specific inline assembly
+ # and go faster for tests.
+ msg "build: i386, make, gcc -O2 (ASan build)" # ~ 30s
+ scripts/config.py full
+ scripts/config.py unset MBEDTLS_AESNI_C # AESNI for 32-bit is tested in test_aesni_m32
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, make, gcc -O2 (ASan build)"
+ make test
+
+ msg "test ssl-opt.sh, i386, make, gcc-O2"
+ tests/ssl-opt.sh
+}
+support_test_m32_o2 () {
+ support_test_m32_no_asm "$@"
+}
+
+component_test_m32_everest () {
+ msg "build: i386, Everest ECDH context (ASan build)" # ~ 6 min
+ scripts/config.py set MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED
+ scripts/config.py unset MBEDTLS_AESNI_C # AESNI for 32-bit is tested in test_aesni_m32
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -m32" LDFLAGS="-m32 $ASAN_CFLAGS"
+
+ msg "test: i386, Everest ECDH context - main suites (inc. selftests) (ASan build)" # ~ 50s
+ make test
+
+ msg "test: i386, Everest ECDH context - ECDH-related part of ssl-opt.sh (ASan build)" # ~ 5s
+ tests/ssl-opt.sh -f ECDH
+
+ msg "test: i386, Everest ECDH context - compat.sh with some ECDH ciphersuites (ASan build)" # ~ 3 min
+ # Exclude some symmetric ciphers that are redundant here to gain time.
+ tests/compat.sh -f ECDH -V NO -e 'ARIA\|CAMELLIA\|CHACHA'
+}
+support_test_m32_everest () {
+ support_test_m32_no_asm "$@"
+}
+
+component_test_mx32 () {
+ msg "build: 64-bit ILP32, make, gcc" # ~ 30s
+ scripts/config.py full
+ make CC=gcc CFLAGS='-O2 -Werror -Wall -Wextra -mx32' LDFLAGS='-mx32'
+
+ msg "test: 64-bit ILP32, make, gcc"
+ make test
+}
+support_test_mx32 () {
+ case $(uname -m) in
+ amd64|x86_64) true;;
+ *) false;;
+ esac
+}
+
+component_test_min_mpi_window_size () {
+ msg "build: Default + MBEDTLS_MPI_WINDOW_SIZE=1 (ASan build)" # ~ 10s
+ scripts/config.py set MBEDTLS_MPI_WINDOW_SIZE 1
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: MBEDTLS_MPI_WINDOW_SIZE=1 - main suites (inc. selftests) (ASan build)" # ~ 10s
+ make test
+}
+
+component_test_have_int32 () {
+ msg "build: gcc, force 32-bit bignum limbs"
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+ make CC=gcc CFLAGS='-O2 -Werror -Wall -Wextra -DMBEDTLS_HAVE_INT32'
+
+ msg "test: gcc, force 32-bit bignum limbs"
+ make test
+}
+
+component_test_have_int64 () {
+ msg "build: gcc, force 64-bit bignum limbs"
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+ make CC=gcc CFLAGS='-O2 -Werror -Wall -Wextra -DMBEDTLS_HAVE_INT64'
+
+ msg "test: gcc, force 64-bit bignum limbs"
+ make test
+}
+
+component_test_have_int32_cmake_new_bignum () {
+ msg "build: gcc, force 32-bit bignum limbs, new bignum interface, test hooks (ASan build)"
+ scripts/config.py unset MBEDTLS_HAVE_ASM
+ scripts/config.py unset MBEDTLS_AESNI_C
+ scripts/config.py unset MBEDTLS_PADLOCK_C
+ scripts/config.py unset MBEDTLS_AESCE_C
+ scripts/config.py set MBEDTLS_TEST_HOOKS
+ scripts/config.py set MBEDTLS_ECP_WITH_MPI_UINT
+ make CC=gcc CFLAGS="$ASAN_CFLAGS -Werror -Wall -Wextra -DMBEDTLS_HAVE_INT32" LDFLAGS="$ASAN_CFLAGS"
+
+ msg "test: gcc, force 32-bit bignum limbs, new bignum interface, test hooks (ASan build)"
+ make test
+}
+
+component_test_no_udbl_division () {
+ msg "build: MBEDTLS_NO_UDBL_DIVISION native" # ~ 10s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_NO_UDBL_DIVISION
+ make CFLAGS='-Werror -O1'
+
+ msg "test: MBEDTLS_NO_UDBL_DIVISION native" # ~ 10s
+ make test
+}
+
+component_test_no_64bit_multiplication () {
+ msg "build: MBEDTLS_NO_64BIT_MULTIPLICATION native" # ~ 10s
+ scripts/config.py full
+ scripts/config.py set MBEDTLS_NO_64BIT_MULTIPLICATION
+ make CFLAGS='-Werror -O1'
+
+ msg "test: MBEDTLS_NO_64BIT_MULTIPLICATION native" # ~ 10s
+ make test
+}
+
+component_test_no_strings () {
+ msg "build: no strings" # ~10s
+ scripts/config.py full
+ # Disable options that activate a large amount of string constants.
+ scripts/config.py unset MBEDTLS_DEBUG_C
+ scripts/config.py unset MBEDTLS_ERROR_C
+ scripts/config.py set MBEDTLS_ERROR_STRERROR_DUMMY
+ scripts/config.py unset MBEDTLS_VERSION_FEATURES
+ make CFLAGS='-Werror -Os'
+
+ msg "test: no strings" # ~ 10s
+ make test
+}
+
+component_test_no_x509_info () {
+ msg "build: full + MBEDTLS_X509_REMOVE_INFO" # ~ 10s
+ scripts/config.pl full
+ scripts/config.pl unset MBEDTLS_MEMORY_BACKTRACE # too slow for tests
+ scripts/config.pl set MBEDTLS_X509_REMOVE_INFO
+ make CFLAGS='-Werror -O2'
+
+ msg "test: full + MBEDTLS_X509_REMOVE_INFO" # ~ 10s
+ make test
+
+ msg "test: ssl-opt.sh, full + MBEDTLS_X509_REMOVE_INFO" # ~ 1 min
+ tests/ssl-opt.sh
+}
+
+component_build_arm_none_eabi_gcc () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -O1, baremetal+debug" # ~ 10s
+ scripts/config.py baremetal
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra -O1' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -O1, baremetal+debug"
+ ${ARM_NONE_EABI_GCC_PREFIX}size -t library/*.o
+}
+
+component_build_arm_linux_gnueabi_gcc_arm5vte () {
+ msg "build: ${ARM_LINUX_GNUEABI_GCC_PREFIX}gcc -march=arm5vte, baremetal+debug" # ~ 10s
+ scripts/config.py baremetal
+ # Build for a target platform that's close to what Debian uses
+ # for its "armel" distribution (https://wiki.debian.org/ArmEabiPort).
+ # See https://github.com/Mbed-TLS/mbedtls/pull/2169 and comments.
+ # Build everything including programs, see for example
+ # https://github.com/Mbed-TLS/mbedtls/pull/3449#issuecomment-675313720
+ make CC="${ARM_LINUX_GNUEABI_GCC_PREFIX}gcc" AR="${ARM_LINUX_GNUEABI_GCC_PREFIX}ar" CFLAGS='-Werror -Wall -Wextra -march=armv5te -O1' LDFLAGS='-march=armv5te'
+
+ msg "size: ${ARM_LINUX_GNUEABI_GCC_PREFIX}gcc -march=armv5te -O1, baremetal+debug"
+ ${ARM_LINUX_GNUEABI_GCC_PREFIX}size -t library/*.o
+}
+support_build_arm_linux_gnueabi_gcc_arm5vte () {
+ type ${ARM_LINUX_GNUEABI_GCC_PREFIX}gcc >/dev/null 2>&1
+}
+
+component_build_arm_none_eabi_gcc_arm5vte () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -march=arm5vte, baremetal+debug" # ~ 10s
+ scripts/config.py baremetal
+ # This is an imperfect substitute for
+ # component_build_arm_linux_gnueabi_gcc_arm5vte
+ # in case the gcc-arm-linux-gnueabi toolchain is not available
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" CFLAGS='-std=c99 -Werror -Wall -Wextra -march=armv5te -O1' LDFLAGS='-march=armv5te' SHELL='sh -x' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -march=armv5te -O1, baremetal+debug"
+ ${ARM_NONE_EABI_GCC_PREFIX}size -t library/*.o
+}
+
+component_build_arm_none_eabi_gcc_m0plus () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -mthumb -mcpu=cortex-m0plus, baremetal_size" # ~ 10s
+ scripts/config.py baremetal_size
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra -mthumb -mcpu=cortex-m0plus -Os' lib
+
+ msg "size: ${ARM_NONE_EABI_GCC_PREFIX}gcc -mthumb -mcpu=cortex-m0plus -Os, baremetal_size"
+ ${ARM_NONE_EABI_GCC_PREFIX}size -t library/*.o
+ for lib in library/*.a; do
+ echo "$lib:"
+ ${ARM_NONE_EABI_GCC_PREFIX}size -t $lib | grep TOTALS
+ done
+}
+
+component_build_arm_none_eabi_gcc_no_udbl_division () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc -DMBEDTLS_NO_UDBL_DIVISION, make" # ~ 10s
+ scripts/config.py baremetal
+ scripts/config.py set MBEDTLS_NO_UDBL_DIVISION
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -Wall -Wextra' lib
+ echo "Checking that software 64-bit division is not required"
+ not grep __aeabi_uldiv library/*.o
+}
+
+component_build_arm_none_eabi_gcc_no_64bit_multiplication () {
+ msg "build: ${ARM_NONE_EABI_GCC_PREFIX}gcc MBEDTLS_NO_64BIT_MULTIPLICATION, make" # ~ 10s
+ scripts/config.py baremetal
+ scripts/config.py set MBEDTLS_NO_64BIT_MULTIPLICATION
+ make CC="${ARM_NONE_EABI_GCC_PREFIX}gcc" AR="${ARM_NONE_EABI_GCC_PREFIX}ar" LD="${ARM_NONE_EABI_GCC_PREFIX}ld" CFLAGS='-std=c99 -Werror -O1 -march=armv6-m -mthumb' lib
+ echo "Checking that software 64-bit multiplication is not required"
+ not grep __aeabi_lmul library/*.o
+}
+
+component_build_arm_clang_thumb () {
+ # ~ 30s
+
+ scripts/config.py baremetal
+
+ msg "build: clang thumb 2, make"
+ make clean
+ make CC="clang" CFLAGS='-std=c99 -Werror -Os --target=arm-linux-gnueabihf -march=armv7-m -mthumb' lib
+
+ # Some Thumb 1 asm is sensitive to optimisation level, so test both -O0 and -Os
+ msg "build: clang thumb 1 -O0, make"
+ make clean
+ make CC="clang" CFLAGS='-std=c99 -Werror -O0 --target=arm-linux-gnueabihf -mcpu=arm1136j-s -mthumb' lib
+
+ msg "build: clang thumb 1 -Os, make"
+ make clean
+ make CC="clang" CFLAGS='-std=c99 -Werror -Os --target=arm-linux-gnueabihf -mcpu=arm1136j-s -mthumb' lib
+}
+
+component_build_armcc () {
+ msg "build: ARM Compiler 5"
+ scripts/config.py baremetal
+ # armc[56] don't support SHA-512 intrinsics
+ scripts/config.py unset MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT
+
+ # older versions of armcc/armclang don't support AESCE_C on 32-bit Arm
+ scripts/config.py unset MBEDTLS_AESCE_C
+
+ # Stop armclang warning about feature detection for A64_CRYPTO.
+ # With this enabled, the library does build correctly under armclang,
+ # but in baremetal builds (as tested here), feature detection is
+ # unavailable, and the user is notified via a #warning. So enabling
+ # this feature would prevent us from building with -Werror on
+ # armclang. Tracked in #7198.
+ scripts/config.py unset MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT
+
+ scripts/config.py set MBEDTLS_HAVE_ASM
+
+ make CC="$ARMC5_CC" AR="$ARMC5_AR" WARNING_CFLAGS='--strict --c99' lib
+
+ msg "size: ARM Compiler 5"
+ "$ARMC5_FROMELF" -z library/*.o
+
+ # Compile mostly with -O1 since some Arm inline assembly is disabled for -O0.
+
+ # ARM Compiler 6 - Target ARMv7-A
+ armc6_build_test "-O1 --target=arm-arm-none-eabi -march=armv7-a"
+
+ # ARM Compiler 6 - Target ARMv7-M
+ armc6_build_test "-O1 --target=arm-arm-none-eabi -march=armv7-m"
+
+ # ARM Compiler 6 - Target ARMv7-M+DSP
+ armc6_build_test "-O1 --target=arm-arm-none-eabi -march=armv7-m+dsp"
+
+ # ARM Compiler 6 - Target ARMv8-A - AArch32
+ armc6_build_test "-O1 --target=arm-arm-none-eabi -march=armv8.2-a"
+
+ # ARM Compiler 6 - Target ARMv8-M
+ armc6_build_test "-O1 --target=arm-arm-none-eabi -march=armv8-m.main"
+
+ # ARM Compiler 6 - Target Cortex-M0 - no optimisation
+ armc6_build_test "-O0 --target=arm-arm-none-eabi -mcpu=cortex-m0"
+
+ # ARM Compiler 6 - Target Cortex-M0
+ armc6_build_test "-Os --target=arm-arm-none-eabi -mcpu=cortex-m0"
+
+ # ARM Compiler 6 - Target ARMv8.2-A - AArch64
+ #
+ # Re-enable MBEDTLS_AESCE_C as this should be supported by the version of armclang
+ # that we have in our CI
+ scripts/config.py set MBEDTLS_AESCE_C
+ armc6_build_test "-O1 --target=aarch64-arm-none-eabi -march=armv8.2-a+crypto"
+}
+
+support_build_armcc () {
+ armc5_cc="$ARMC5_BIN_DIR/armcc"
+ armc6_cc="$ARMC6_BIN_DIR/armclang"
+ (check_tools "$armc5_cc" "$armc6_cc" > /dev/null 2>&1)
+}
+
+component_test_tls12_only () {
+ msg "build: default config without MBEDTLS_SSL_PROTO_TLS1_3, cmake, gcc, ASan"
+ scripts/config.py unset MBEDTLS_SSL_PROTO_TLS1_3
+ CC=gcc cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+
+ msg "test: main suites (inc. selftests) (ASan build)"
+ make test
+
+ msg "test: ssl-opt.sh (ASan build)"
+ tests/ssl-opt.sh
+
+ msg "test: compat.sh (ASan build)"
+ tests/compat.sh
+}
+
+component_test_tls13_only () {
+ msg "build: default config without MBEDTLS_SSL_PROTO_TLS1_2"
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ scripts/config.py set MBEDTLS_SSL_RECORD_SIZE_LIMIT
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test: TLS 1.3 only, all key exchange modes enabled"
+ make test
+
+ msg "ssl-opt.sh: TLS 1.3 only, all key exchange modes enabled"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_psk () {
+ msg "build: TLS 1.3 only from default, only PSK key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_ECDH_C
+ scripts/config.py unset MBEDTLS_DHM_C
+ scripts/config.py unset MBEDTLS_X509_CRT_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ scripts/config.py unset MBEDTLS_SSL_SERVER_NAME_INDICATION
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, only PSK key exchange mode enabled"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, only PSK key exchange mode enabled"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_ephemeral () {
+ msg "build: TLS 1.3 only from default, only ephemeral key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_EARLY_DATA
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, only ephemeral key exchange mode"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, only ephemeral key exchange mode"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_ephemeral_ffdh () {
+ msg "build: TLS 1.3 only from default, only ephemeral ffdh key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_EARLY_DATA
+ scripts/config.py unset MBEDTLS_ECDH_C
+
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, only ephemeral ffdh key exchange mode"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, only ephemeral ffdh key exchange mode"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_psk_ephemeral () {
+ msg "build: TLS 1.3 only from default, only PSK ephemeral key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_X509_CRT_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ scripts/config.py unset MBEDTLS_SSL_SERVER_NAME_INDICATION
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, only PSK ephemeral key exchange mode"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, only PSK ephemeral key exchange mode"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_psk_ephemeral_ffdh () {
+ msg "build: TLS 1.3 only from default, only PSK ephemeral ffdh key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_ENABLED
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_X509_CRT_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ scripts/config.py unset MBEDTLS_SSL_SERVER_NAME_INDICATION
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ scripts/config.py unset MBEDTLS_ECDH_C
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, only PSK ephemeral ffdh key exchange mode"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, only PSK ephemeral ffdh key exchange mode"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_psk_all () {
+ msg "build: TLS 1.3 only from default, without ephemeral key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED
+ scripts/config.py unset MBEDTLS_X509_CRT_PARSE_C
+ scripts/config.py unset MBEDTLS_X509_RSASSA_PSS_SUPPORT
+ scripts/config.py unset MBEDTLS_SSL_SERVER_NAME_INDICATION
+ scripts/config.py unset MBEDTLS_ECDSA_C
+ scripts/config.py unset MBEDTLS_PKCS1_V21
+ scripts/config.py unset MBEDTLS_PKCS7_C
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, PSK and PSK ephemeral key exchange modes"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, PSK and PSK ephemeral key exchange modes"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_only_ephemeral_all () {
+ msg "build: TLS 1.3 only from default, without PSK key exchange mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_ENABLED
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/tls13-only.h\"'"
+
+ msg "test_suite_ssl: TLS 1.3 only, ephemeral and PSK ephemeral key exchange modes"
+ cd tests; ./test_suite_ssl; cd ..
+
+ msg "ssl-opt.sh: TLS 1.3 only, ephemeral and PSK ephemeral key exchange modes"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_no_padding () {
+ msg "build: default config plus early data minus padding"
+ scripts/config.py set MBEDTLS_SSL_CID_TLS1_3_PADDING_GRANULARITY 1
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+ msg "test: default config plus early data minus padding"
+ make test
+ msg "ssl-opt.sh (TLS 1.3 no padding)"
+ tests/ssl-opt.sh
+}
+
+component_test_tls13_no_compatibility_mode () {
+ msg "build: default config plus early data minus middlebox compatibility mode"
+ scripts/config.py unset MBEDTLS_SSL_TLS1_3_COMPATIBILITY_MODE
+ scripts/config.py set MBEDTLS_SSL_EARLY_DATA
+ CC=$ASAN_CC cmake -D CMAKE_BUILD_TYPE:String=Asan .
+ make
+ msg "test: default config plus early data minus middlebox compatibility mode"
+ make test
+ msg "ssl-opt.sh (TLS 1.3 no compatibility mode)"
+ tests/ssl-opt.sh
+}
+
+component_build_mingw () {
+ msg "build: Windows cross build - mingw64, make (Link Library)" # ~ 30s
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra -maes -msse2 -mpclmul' WINDOWS_BUILD=1 lib programs
+
+ # note Make tests only builds the tests, but doesn't run them
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -maes -msse2 -mpclmul' WINDOWS_BUILD=1 tests
+ make WINDOWS_BUILD=1 clean
+
+ msg "build: Windows cross build - mingw64, make (DLL)" # ~ 30s
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra -maes -msse2 -mpclmul' WINDOWS_BUILD=1 SHARED=1 lib programs
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra -maes -msse2 -mpclmul' WINDOWS_BUILD=1 SHARED=1 tests
+ make WINDOWS_BUILD=1 clean
+
+ msg "build: Windows cross build - mingw64, make (Library only, default config without MBEDTLS_AESNI_C)" # ~ 30s
+ ./scripts/config.py unset MBEDTLS_AESNI_C #
+ make CC=i686-w64-mingw32-gcc AR=i686-w64-mingw32-ar LD=i686-w64-minggw32-ld CFLAGS='-Werror -Wall -Wextra' WINDOWS_BUILD=1 lib
+ make WINDOWS_BUILD=1 clean
+}
+support_build_mingw() {
+ case $(i686-w64-mingw32-gcc -dumpversion 2>/dev/null) in
+ [0-5]*|"") false;;
+ *) true;;
+ esac
+}
+
+component_test_memsan () {
+ msg "build: MSan (clang)" # ~ 1 min 20s
+ scripts/config.py unset MBEDTLS_AESNI_C # memsan doesn't grok asm
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=MemSan .
+ make
+
+ msg "test: main suites (MSan)" # ~ 10s
+ make test
+
+ msg "test: metatests (MSan)"
+ tests/scripts/run-metatests.sh any msan
+
+ msg "program demos (MSan)" # ~20s
+ tests/scripts/run_demos.py
+
+ msg "test: ssl-opt.sh (MSan)" # ~ 1 min
+ tests/ssl-opt.sh
+
+ # Optional part(s)
+
+ if [ "$MEMORY" -gt 0 ]; then
+ msg "test: compat.sh (MSan)" # ~ 6 min 20s
+ tests/compat.sh
+ fi
+}
+
+component_release_test_valgrind () {
+ msg "build: Release (clang)"
+ # default config, in particular without MBEDTLS_USE_PSA_CRYPTO
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make
+
+ msg "test: main suites, Valgrind (default config)"
+ make memcheck
+
+ # Optional parts (slow; currently broken on OS X because programs don't
+ # seem to receive signals under valgrind on OS X).
+ # These optional parts don't run on the CI.
+ if [ "$MEMORY" -gt 0 ]; then
+ msg "test: ssl-opt.sh --memcheck (default config)"
+ tests/ssl-opt.sh --memcheck
+ fi
+
+ if [ "$MEMORY" -gt 1 ]; then
+ msg "test: compat.sh --memcheck (default config)"
+ tests/compat.sh --memcheck
+ fi
+
+ if [ "$MEMORY" -gt 0 ]; then
+ msg "test: context-info.sh --memcheck (default config)"
+ tests/context-info.sh --memcheck
+ fi
+}
+
+component_release_test_valgrind_psa () {
+ msg "build: Release, full (clang)"
+ # full config, in particular with MBEDTLS_USE_PSA_CRYPTO
+ scripts/config.py full
+ CC=clang cmake -D CMAKE_BUILD_TYPE:String=Release .
+ make
+
+ msg "test: main suites, Valgrind (full config)"
+ make memcheck
+}
+
+support_test_cmake_out_of_source () {
+ distrib_id=""
+ distrib_ver=""
+ distrib_ver_minor=""
+ distrib_ver_major=""
+
+ # Attempt to parse lsb-release to find out distribution and version. If not
+ # found this should fail safe (test is supported).
+ if [[ -f /etc/lsb-release ]]; then
+
+ while read -r lsb_line; do
+ case "$lsb_line" in
+ "DISTRIB_ID"*) distrib_id=${lsb_line/#DISTRIB_ID=};;
+ "DISTRIB_RELEASE"*) distrib_ver=${lsb_line/#DISTRIB_RELEASE=};;
+ esac
+ done < /etc/lsb-release
+
+ distrib_ver_major="${distrib_ver%%.*}"
+ distrib_ver="${distrib_ver#*.}"
+ distrib_ver_minor="${distrib_ver%%.*}"
+ fi
+
+ # Running the out of source CMake test on Ubuntu 16.04 using more than one
+ # processor (as the CI does) can create a race condition whereby the build
+ # fails to see a generated file, despite that file actually having been
+ # generated. This problem appears to go away with 18.04 or newer, so make
+ # the out of source tests unsupported on Ubuntu 16.04.
+ [ "$distrib_id" != "Ubuntu" ] || [ "$distrib_ver_major" -gt 16 ]
+}
+
+component_test_cmake_out_of_source () {
+ # Remove existing generated files so that we use the ones cmake
+ # generates
+ make neat
+
+ msg "build: cmake 'out-of-source' build"
+ MBEDTLS_ROOT_DIR="$PWD"
+ mkdir "$OUT_OF_SOURCE_DIR"
+ cd "$OUT_OF_SOURCE_DIR"
+ # Note: Explicitly generate files as these are turned off in releases
+ cmake -D CMAKE_BUILD_TYPE:String=Check -D GEN_FILES=ON "$MBEDTLS_ROOT_DIR"
+ make
+
+ msg "test: cmake 'out-of-source' build"
+ make test
+ # Check that ssl-opt.sh can find the test programs.
+ # Also ensure that there are no error messages such as
+ # "No such file or directory", which would indicate that some required
+ # file is missing (ssl-opt.sh tolerates the absence of some files so
+ # may exit with status 0 but emit errors).
+ ./tests/ssl-opt.sh -f 'Default' >ssl-opt.out 2>ssl-opt.err
+ grep PASS ssl-opt.out
+ cat ssl-opt.err >&2
+ # If ssl-opt.err is non-empty, record an error and keep going.
+ [ ! -s ssl-opt.err ]
+ rm ssl-opt.out ssl-opt.err
+ cd "$MBEDTLS_ROOT_DIR"
+ rm -rf "$OUT_OF_SOURCE_DIR"
+}
+
+component_test_cmake_as_subdirectory () {
+ # Remove existing generated files so that we use the ones CMake
+ # generates
+ make neat
+
+ msg "build: cmake 'as-subdirectory' build"
+ cd programs/test/cmake_subproject
+ # Note: Explicitly generate files as these are turned off in releases
+ cmake -D GEN_FILES=ON .
+ make
+ ./cmake_subproject
+}
+support_test_cmake_as_subdirectory () {
+ support_test_cmake_out_of_source
+}
+
+component_test_cmake_as_package () {
+ # Remove existing generated files so that we use the ones CMake
+ # generates
+ make neat
+
+ msg "build: cmake 'as-package' build"
+ cd programs/test/cmake_package
+ cmake .
+ make
+ ./cmake_package
+}
+support_test_cmake_as_package () {
+ support_test_cmake_out_of_source
+}
+
+component_test_cmake_as_package_install () {
+ # Remove existing generated files so that we use the ones CMake
+ # generates
+ make neat
+
+ msg "build: cmake 'as-installed-package' build"
+ cd programs/test/cmake_package_install
+ cmake .
+ make
+ ./cmake_package_install
+}
+support_test_cmake_as_package_install () {
+ support_test_cmake_out_of_source
+}
+
+component_build_cmake_custom_config_file () {
+ # Make a copy of config file to use for the in-tree test
+ cp "$CONFIG_H" include/mbedtls_config_in_tree_copy.h
+
+ MBEDTLS_ROOT_DIR="$PWD"
+ mkdir "$OUT_OF_SOURCE_DIR"
+ cd "$OUT_OF_SOURCE_DIR"
+
+ # Build once to get the generated files (which need an intact config file)
+ cmake "$MBEDTLS_ROOT_DIR"
+ make
+
+ msg "build: cmake with -DMBEDTLS_CONFIG_FILE"
+ scripts/config.py -w full_config.h full
+ echo '#error "cmake -DMBEDTLS_CONFIG_FILE is not working."' > "$MBEDTLS_ROOT_DIR/$CONFIG_H"
+ cmake -DGEN_FILES=OFF -DMBEDTLS_CONFIG_FILE=full_config.h "$MBEDTLS_ROOT_DIR"
+ make
+
+ msg "build: cmake with -DMBEDTLS_CONFIG_FILE + -DMBEDTLS_USER_CONFIG_FILE"
+ # In the user config, disable one feature (for simplicity, pick a feature
+ # that nothing else depends on).
+ echo '#undef MBEDTLS_NIST_KW_C' >user_config.h
+
+ cmake -DGEN_FILES=OFF -DMBEDTLS_CONFIG_FILE=full_config.h -DMBEDTLS_USER_CONFIG_FILE=user_config.h "$MBEDTLS_ROOT_DIR"
+ make
+ not programs/test/query_compile_time_config MBEDTLS_NIST_KW_C
+
+ rm -f user_config.h full_config.h
+
+ cd "$MBEDTLS_ROOT_DIR"
+ rm -rf "$OUT_OF_SOURCE_DIR"
+
+ # Now repeat the test for an in-tree build:
+
+ # Restore config for the in-tree test
+ mv include/mbedtls_config_in_tree_copy.h "$CONFIG_H"
+
+ # Build once to get the generated files (which need an intact config)
+ cmake .
+ make
+
+ msg "build: cmake (in-tree) with -DMBEDTLS_CONFIG_FILE"
+ scripts/config.py -w full_config.h full
+ echo '#error "cmake -DMBEDTLS_CONFIG_FILE is not working."' > "$MBEDTLS_ROOT_DIR/$CONFIG_H"
+ cmake -DGEN_FILES=OFF -DMBEDTLS_CONFIG_FILE=full_config.h .
+ make
+
+ msg "build: cmake (in-tree) with -DMBEDTLS_CONFIG_FILE + -DMBEDTLS_USER_CONFIG_FILE"
+ # In the user config, disable one feature (for simplicity, pick a feature
+ # that nothing else depends on).
+ echo '#undef MBEDTLS_NIST_KW_C' >user_config.h
+
+ cmake -DGEN_FILES=OFF -DMBEDTLS_CONFIG_FILE=full_config.h -DMBEDTLS_USER_CONFIG_FILE=user_config.h .
+ make
+ not programs/test/query_compile_time_config MBEDTLS_NIST_KW_C
+
+ rm -f user_config.h full_config.h
+}
+support_build_cmake_custom_config_file () {
+ support_test_cmake_out_of_source
+}
+
+
+component_build_zeroize_checks () {
+ msg "build: check for obviously wrong calls to mbedtls_platform_zeroize()"
+
+ scripts/config.py full
+
+ # Only compile - we're looking for sizeof-pointer-memaccess warnings
+ make CFLAGS="'-DMBEDTLS_USER_CONFIG_FILE=\"../tests/configs/user-config-zeroize-memset.h\"' -DMBEDTLS_TEST_DEFINES_ZEROIZE -Werror -Wsizeof-pointer-memaccess"
+}
+
+
+component_test_zeroize () {
+ # Test that the function mbedtls_platform_zeroize() is not optimized away by
+ # different combinations of compilers and optimization flags by using an
+ # auxiliary GDB script. Unfortunately, GDB does not return error values to the
+ # system in all cases that the script fails, so we must manually search the
+ # output to check whether the pass string is present and no failure strings
+ # were printed.
+
+ # Don't try to disable ASLR. We don't care about ASLR here. We do care
+ # about a spurious message if Gdb tries and fails, so suppress that.
+ gdb_disable_aslr=
+ if [ -z "$(gdb -batch -nw -ex 'set disable-randomization off' 2>&1)" ]; then
+ gdb_disable_aslr='set disable-randomization off'
+ fi
+
+ for optimization_flag in -O2 -O3 -Ofast -Os; do
+ for compiler in clang gcc; do
+ msg "test: $compiler $optimization_flag, mbedtls_platform_zeroize()"
+ make programs CC="$compiler" DEBUG=1 CFLAGS="$optimization_flag"
+ gdb -ex "$gdb_disable_aslr" -x tests/scripts/test_zeroize.gdb -nw -batch -nx 2>&1 | tee test_zeroize.log
+ grep "The buffer was correctly zeroized" test_zeroize.log
+ not grep -i "error" test_zeroize.log
+ rm -f test_zeroize.log
+ make clean
+ done
+ done
+}
+
+component_test_psa_compliance () {
+ # The arch tests build with gcc, so require use of gcc here to link properly
+ msg "build: make, default config (out-of-box), libmbedcrypto.a only"
+ CC=gcc make -C library libmbedcrypto.a
+
+ msg "unit test: test_psa_compliance.py"
+ CC=gcc ./tests/scripts/test_psa_compliance.py
+}
+
+support_test_psa_compliance () {
+ # psa-compliance-tests only supports CMake >= 3.10.0
+ ver="$(cmake --version)"
+ ver="${ver#cmake version }"
+ ver_major="${ver%%.*}"
+
+ ver="${ver#*.}"
+ ver_minor="${ver%%.*}"
+
+ [ "$ver_major" -eq 3 ] && [ "$ver_minor" -ge 10 ]
+}
+
+component_check_code_style () {
+ msg "Check C code style"
+ ./scripts/code_style.py
+}
+
+support_check_code_style() {
+ case $(uncrustify --version) in
+ *0.75.1*) true;;
+ *) false;;
+ esac
+}
+
+component_check_python_files () {
+ msg "Lint: Python scripts"
+ tests/scripts/check-python-files.sh
+}
+
+component_check_test_helpers () {
+ msg "unit test: generate_test_code.py"
+ # unittest writes out mundane stuff like number or tests run on stderr.
+ # Our convention is to reserve stderr for actual errors, and write
+ # harmless info on stdout so it can be suppress with --quiet.
+ ./tests/scripts/test_generate_test_code.py 2>&1
+
+ msg "unit test: translate_ciphers.py"
+ python3 -m unittest tests/scripts/translate_ciphers.py 2>&1
+}
+
+
+################################################################
+#### Termination
+################################################################
+
+post_report () {
+ msg "Done, cleaning up"
+ final_cleanup
+
+ final_report
+}
+
+
+
+################################################################
+#### Run all the things
+################################################################
+
+# Function invoked by --error-test to test error reporting.
+pseudo_component_error_test () {
+ msg "Testing error reporting $error_test_i"
+ if [ $KEEP_GOING -ne 0 ]; then
+ echo "Expect three failing commands."
+ fi
+ # If the component doesn't run in a subshell, changing error_test_i to an
+ # invalid integer will cause an error in the loop that runs this function.
+ error_test_i=this_should_not_be_used_since_the_component_runs_in_a_subshell
+ # Expected error: 'grep non_existent /dev/null -> 1'
+ grep non_existent /dev/null
+ # Expected error: '! grep -q . tests/scripts/all.sh -> 1'
+ not grep -q . "$0"
+ # Expected error: 'make unknown_target -> 2'
+ make unknown_target
+ false "this should not be executed"
+}
+
+# Run one component and clean up afterwards.
+run_component () {
+ current_component="$1"
+ export MBEDTLS_TEST_CONFIGURATION="$current_component"
+
+ # Unconditionally create a seedfile that's sufficiently long.
+ # Do this before each component, because a previous component may
+ # have messed it up or shortened it.
+ local dd_cmd
+ dd_cmd=(dd if=/dev/urandom of=./tests/seedfile bs=64 count=1)
+ case $OSTYPE in
+ linux*|freebsd*|openbsd*) dd_cmd+=(status=none)
+ esac
+ "${dd_cmd[@]}"
+
+ # Run the component in a subshell, with error trapping and output
+ # redirection set up based on the relevant options.
+ if [ $KEEP_GOING -eq 1 ]; then
+ # We want to keep running if the subshell fails, so 'set -e' must
+ # be off when the subshell runs.
+ set +e
+ fi
+ (
+ if [ $QUIET -eq 1 ]; then
+ # msg() will be silenced, so just print the component name here.
+ echo "${current_component#component_}"
+ exec >/dev/null
+ fi
+ if [ $KEEP_GOING -eq 1 ]; then
+ # Keep "set -e" off, and run an ERR trap instead to record failures.
+ set -E
+ trap err_trap ERR
+ fi
+ # The next line is what runs the component
+ "$@"
+ if [ $KEEP_GOING -eq 1 ]; then
+ trap - ERR
+ exit $last_failure_status
+ fi
+ )
+ component_status=$?
+ if [ $KEEP_GOING -eq 1 ]; then
+ set -e
+ if [ $component_status -ne 0 ]; then
+ failure_count=$((failure_count + 1))
+ fi
+ fi
+
+ # Restore the build tree to a clean state.
+ cleanup
+ unset current_component
+}
+
+# Preliminary setup
+pre_check_environment
+pre_initialize_variables
+pre_parse_command_line "$@"
+
+setup_quiet_wrappers
+pre_check_git
+pre_restore_files
+pre_back_up
+
+build_status=0
+if [ $KEEP_GOING -eq 1 ]; then
+ pre_setup_keep_going
+fi
+pre_prepare_outcome_file
+pre_print_configuration
+pre_check_tools
+cleanup
+if in_mbedtls_repo; then
+ pre_generate_files
+fi
+
+# Run the requested tests.
+for ((error_test_i=1; error_test_i <= error_test; error_test_i++)); do
+ run_component pseudo_component_error_test
+done
+unset error_test_i
+for component in $RUN_COMPONENTS; do
+ run_component "component_$component"
+done
+
+# We're done.
+post_report
diff --git a/tests/scripts/analyze_outcomes.py b/tests/scripts/analyze_outcomes.py
new file mode 100755
index 0000000..5b4deb6
--- /dev/null
+++ b/tests/scripts/analyze_outcomes.py
@@ -0,0 +1,720 @@
+#!/usr/bin/env python3
+
+"""Analyze the test outcomes from a full CI run.
+
+This script can also run on outcomes from a partial run, but the results are
+less likely to be useful.
+"""
+
+import argparse
+import sys
+import traceback
+import re
+import subprocess
+import os
+import typing
+
+import check_test_cases
+
+
+# `ComponentOutcomes` is a named tuple which is defined as:
+# ComponentOutcomes(
+# successes = {
+# "<suite_case>",
+# ...
+# },
+# failures = {
+# "<suite_case>",
+# ...
+# }
+# )
+# suite_case = "<suite>;<case>"
+ComponentOutcomes = typing.NamedTuple('ComponentOutcomes',
+ [('successes', typing.Set[str]),
+ ('failures', typing.Set[str])])
+
+# `Outcomes` is a representation of the outcomes file,
+# which defined as:
+# Outcomes = {
+# "<component>": ComponentOutcomes,
+# ...
+# }
+Outcomes = typing.Dict[str, ComponentOutcomes]
+
+
+class Results:
+ """Process analysis results."""
+
+ def __init__(self):
+ self.error_count = 0
+ self.warning_count = 0
+
+ def new_section(self, fmt, *args, **kwargs):
+ self._print_line('\n*** ' + fmt + ' ***\n', *args, **kwargs)
+
+ def info(self, fmt, *args, **kwargs):
+ self._print_line('Info: ' + fmt, *args, **kwargs)
+
+ def error(self, fmt, *args, **kwargs):
+ self.error_count += 1
+ self._print_line('Error: ' + fmt, *args, **kwargs)
+
+ def warning(self, fmt, *args, **kwargs):
+ self.warning_count += 1
+ self._print_line('Warning: ' + fmt, *args, **kwargs)
+
+ @staticmethod
+ def _print_line(fmt, *args, **kwargs):
+ sys.stderr.write((fmt + '\n').format(*args, **kwargs))
+
+def execute_reference_driver_tests(results: Results, ref_component: str, driver_component: str, \
+ outcome_file: str) -> None:
+ """Run the tests specified in ref_component and driver_component. Results
+ are stored in the output_file and they will be used for the following
+ coverage analysis"""
+ results.new_section("Test {} and {}", ref_component, driver_component)
+
+ shell_command = "tests/scripts/all.sh --outcome-file " + outcome_file + \
+ " " + ref_component + " " + driver_component
+ results.info("Running: {}", shell_command)
+ ret_val = subprocess.run(shell_command.split(), check=False).returncode
+
+ if ret_val != 0:
+ results.error("failed to run reference/driver components")
+
+def analyze_coverage(results: Results, outcomes: Outcomes,
+ allow_list: typing.List[str], full_coverage: bool) -> None:
+ """Check that all available test cases are executed at least once."""
+ available = check_test_cases.collect_available_test_cases()
+ for suite_case in available:
+ hit = any(suite_case in comp_outcomes.successes or
+ suite_case in comp_outcomes.failures
+ for comp_outcomes in outcomes.values())
+
+ if not hit and suite_case not in allow_list:
+ if full_coverage:
+ results.error('Test case not executed: {}', suite_case)
+ else:
+ results.warning('Test case not executed: {}', suite_case)
+ elif hit and suite_case in allow_list:
+ # Test Case should be removed from the allow list.
+ if full_coverage:
+ results.error('Allow listed test case was executed: {}', suite_case)
+ else:
+ results.warning('Allow listed test case was executed: {}', suite_case)
+
+def name_matches_pattern(name: str, str_or_re) -> bool:
+ """Check if name matches a pattern, that may be a string or regex.
+ - If the pattern is a string, name must be equal to match.
+ - If the pattern is a regex, name must fully match.
+ """
+ # The CI's python is too old for re.Pattern
+ #if isinstance(str_or_re, re.Pattern):
+ if not isinstance(str_or_re, str):
+ return str_or_re.fullmatch(name) is not None
+ else:
+ return str_or_re == name
+
+def analyze_driver_vs_reference(results: Results, outcomes: Outcomes,
+ component_ref: str, component_driver: str,
+ ignored_suites: typing.List[str], ignored_tests=None) -> None:
+ """Check that all tests passing in the reference component are also
+ passing in the corresponding driver component.
+ Skip:
+ - full test suites provided in ignored_suites list
+ - only some specific test inside a test suite, for which the corresponding
+ output string is provided
+ """
+ ref_outcomes = outcomes.get("component_" + component_ref)
+ driver_outcomes = outcomes.get("component_" + component_driver)
+
+ if ref_outcomes is None or driver_outcomes is None:
+ results.error("required components are missing: bad outcome file?")
+ return
+
+ if not ref_outcomes.successes:
+ results.error("no passing test in reference component: bad outcome file?")
+ return
+
+ for suite_case in ref_outcomes.successes:
+ # suite_case is like "test_suite_foo.bar;Description of test case"
+ (full_test_suite, test_string) = suite_case.split(';')
+ test_suite = full_test_suite.split('.')[0] # retrieve main part of test suite name
+
+ # Immediately skip fully-ignored test suites
+ if test_suite in ignored_suites or full_test_suite in ignored_suites:
+ continue
+
+ # For ignored test cases inside test suites, just remember and:
+ # don't issue an error if they're skipped with drivers,
+ # but issue an error if they're not (means we have a bad entry).
+ ignored = False
+ if full_test_suite in ignored_tests:
+ for str_or_re in ignored_tests[full_test_suite]:
+ if name_matches_pattern(test_string, str_or_re):
+ ignored = True
+
+ if not ignored and not suite_case in driver_outcomes.successes:
+ results.error("PASS -> SKIP/FAIL: {}", suite_case)
+ if ignored and suite_case in driver_outcomes.successes:
+ results.error("uselessly ignored: {}", suite_case)
+
+def analyze_outcomes(results: Results, outcomes: Outcomes, args) -> None:
+ """Run all analyses on the given outcome collection."""
+ analyze_coverage(results, outcomes, args['allow_list'],
+ args['full_coverage'])
+
+def read_outcome_file(outcome_file: str) -> Outcomes:
+ """Parse an outcome file and return an outcome collection.
+ """
+ outcomes = {}
+ with open(outcome_file, 'r', encoding='utf-8') as input_file:
+ for line in input_file:
+ (_platform, component, suite, case, result, _cause) = line.split(';')
+ # Note that `component` is not unique. If a test case passes on Linux
+ # and fails on FreeBSD, it'll end up in both the successes set and
+ # the failures set.
+ suite_case = ';'.join([suite, case])
+ if component not in outcomes:
+ outcomes[component] = ComponentOutcomes(set(), set())
+ if result == 'PASS':
+ outcomes[component].successes.add(suite_case)
+ elif result == 'FAIL':
+ outcomes[component].failures.add(suite_case)
+
+ return outcomes
+
+def do_analyze_coverage(results: Results, outcomes: Outcomes, args) -> None:
+ """Perform coverage analysis."""
+ results.new_section("Analyze coverage")
+ analyze_outcomes(results, outcomes, args)
+
+def do_analyze_driver_vs_reference(results: Results, outcomes: Outcomes, args) -> None:
+ """Perform driver vs reference analyze."""
+ results.new_section("Analyze driver {} vs reference {}",
+ args['component_driver'], args['component_ref'])
+
+ ignored_suites = ['test_suite_' + x for x in args['ignored_suites']]
+
+ analyze_driver_vs_reference(results, outcomes,
+ args['component_ref'], args['component_driver'],
+ ignored_suites, args['ignored_tests'])
+
+# List of tasks with a function that can handle this task and additional arguments if required
+KNOWN_TASKS = {
+ 'analyze_coverage': {
+ 'test_function': do_analyze_coverage,
+ 'args': {
+ 'allow_list': [
+ # Algorithm not supported yet
+ 'test_suite_psa_crypto_metadata;Asymmetric signature: pure EdDSA',
+ # Algorithm not supported yet
+ 'test_suite_psa_crypto_metadata;Cipher: XTS',
+ ],
+ 'full_coverage': False,
+ }
+ },
+ # There are 2 options to use analyze_driver_vs_reference_xxx locally:
+ # 1. Run tests and then analysis:
+ # - tests/scripts/all.sh --outcome-file "$PWD/out.csv" <component_ref> <component_driver>
+ # - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
+ # 2. Let this script run both automatically:
+ # - tests/scripts/analyze_outcomes.py out.csv analyze_driver_vs_reference_xxx
+ 'analyze_driver_vs_reference_hash': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_hash_use_psa',
+ 'component_driver': 'test_psa_crypto_config_accel_hash_use_psa',
+ 'ignored_suites': [
+ 'shax', 'mdx', # the software implementations that are being excluded
+ 'md.psa', # purposefully depends on whether drivers are present
+ 'psa_crypto_low_hash.generated', # testing the builtins
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_hmac': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_hmac',
+ 'component_driver': 'test_psa_crypto_config_accel_hmac',
+ 'ignored_suites': [
+ # These suites require legacy hash support, which is disabled
+ # in the accelerated component.
+ 'shax', 'mdx',
+ # This suite tests builtins directly, but these are missing
+ # in the accelerated case.
+ 'psa_crypto_low_hash.generated',
+ ],
+ 'ignored_tests': {
+ 'test_suite_md': [
+ # Builtin HMAC is not supported in the accelerate component.
+ re.compile('.*HMAC.*'),
+ # Following tests make use of functions which are not available
+ # when MD_C is disabled, as it happens in the accelerated
+ # test component.
+ re.compile('generic .* Hash file .*'),
+ 'MD list',
+ ],
+ 'test_suite_md.psa': [
+ # "legacy only" tests require hash algorithms to be NOT
+ # accelerated, but this of course false for the accelerated
+ # test component.
+ re.compile('PSA dispatch .* legacy only'),
+ ],
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_cipher_aead_cmac': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_cipher_aead_cmac',
+ 'component_driver': 'test_psa_crypto_config_accel_cipher_aead_cmac',
+ # Modules replaced by drivers.
+ 'ignored_suites': [
+ # low-level (block/stream) cipher modules
+ 'aes', 'aria', 'camellia', 'des', 'chacha20',
+ # AEAD modes and CMAC
+ 'ccm', 'chachapoly', 'cmac', 'gcm',
+ # The Cipher abstraction layer
+ 'cipher',
+ ],
+ 'ignored_tests': {
+ # PEM decryption is not supported so far.
+ # The rest of PEM (write, unencrypted read) works though.
+ 'test_suite_pem': [
+ re.compile(r'PEM read .*(AES|DES|\bencrypt).*'),
+ ],
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # Following tests depend on AES_C/DES_C but are not about
+ # them really, just need to know some error code is there.
+ 'test_suite_error': [
+ 'Low and high error',
+ 'Single low error'
+ ],
+ # Similar to test_suite_error above.
+ 'test_suite_version': [
+ 'Check for MBEDTLS_AES_C when already present',
+ ],
+ # The en/decryption part of PKCS#12 is not supported so far.
+ # The rest of PKCS#12 (key derivation) works though.
+ 'test_suite_pkcs12': [
+ re.compile(r'PBE Encrypt, .*'),
+ re.compile(r'PBE Decrypt, .*'),
+ ],
+ # The en/decryption part of PKCS#5 is not supported so far.
+ # The rest of PKCS#5 (PBKDF2) works though.
+ 'test_suite_pkcs5': [
+ re.compile(r'PBES2 Encrypt, .*'),
+ re.compile(r'PBES2 Decrypt .*'),
+ ],
+ # Encrypted keys are not supported so far.
+ # pylint: disable=line-too-long
+ 'test_suite_pkparse': [
+ 'Key ASN1 (Encrypted key PKCS12, trailing garbage data)',
+ 'Key ASN1 (Encrypted key PKCS5, trailing garbage data)',
+ re.compile(r'Parse (RSA|EC) Key .*\(.* ([Ee]ncrypted|password).*\)'),
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_ecp_light_only': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_ecc_ecp_light_only',
+ 'component_driver': 'test_psa_crypto_config_accel_ecc_ecp_light_only',
+ 'ignored_suites': [
+ # Modules replaced by drivers
+ 'ecdsa', 'ecdh', 'ecjpake',
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # This test wants a legacy function that takes f_rng, p_rng
+ # arguments, and uses legacy ECDSA for that. The test is
+ # really about the wrapper around the PSA RNG, not ECDSA.
+ 'test_suite_random': [
+ 'PSA classic wrapper: ECDSA signature (SECP256R1)',
+ ],
+ # In the accelerated test ECP_C is not set (only ECP_LIGHT is)
+ # so we must ignore disparities in the tests for which ECP_C
+ # is required.
+ 'test_suite_ecp': [
+ re.compile(r'ECP check public-private .*'),
+ re.compile(r'ECP calculate public: .*'),
+ re.compile(r'ECP gen keypair .*'),
+ re.compile(r'ECP point muladd .*'),
+ re.compile(r'ECP point multiplication .*'),
+ re.compile(r'ECP test vectors .*'),
+ ],
+ 'test_suite_ssl': [
+ # This deprecated function is only present when ECP_C is On.
+ 'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_no_ecp_at_all': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_ecc_no_ecp_at_all',
+ 'component_driver': 'test_psa_crypto_config_accel_ecc_no_ecp_at_all',
+ 'ignored_suites': [
+ # Modules replaced by drivers
+ 'ecp', 'ecdsa', 'ecdh', 'ecjpake',
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # See ecp_light_only
+ 'test_suite_random': [
+ 'PSA classic wrapper: ECDSA signature (SECP256R1)',
+ ],
+ 'test_suite_pkparse': [
+ # When PK_PARSE_C and ECP_C are defined then PK_PARSE_EC_COMPRESSED
+ # is automatically enabled in build_info.h (backward compatibility)
+ # even if it is disabled in config_psa_crypto_no_ecp_at_all(). As a
+ # consequence compressed points are supported in the reference
+ # component but not in the accelerated one, so they should be skipped
+ # while checking driver's coverage.
+ re.compile(r'Parse EC Key .*compressed\)'),
+ re.compile(r'Parse Public EC Key .*compressed\)'),
+ ],
+ # See ecp_light_only
+ 'test_suite_ssl': [
+ 'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_ecc_no_bignum': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_ecc_no_bignum',
+ 'component_driver': 'test_psa_crypto_config_accel_ecc_no_bignum',
+ 'ignored_suites': [
+ # Modules replaced by drivers
+ 'ecp', 'ecdsa', 'ecdh', 'ecjpake',
+ 'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
+ 'bignum.generated', 'bignum.misc',
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # See ecp_light_only
+ 'test_suite_random': [
+ 'PSA classic wrapper: ECDSA signature (SECP256R1)',
+ ],
+ # See no_ecp_at_all
+ 'test_suite_pkparse': [
+ re.compile(r'Parse EC Key .*compressed\)'),
+ re.compile(r'Parse Public EC Key .*compressed\)'),
+ ],
+ 'test_suite_asn1parse': [
+ 'INTEGER too large for mpi',
+ ],
+ 'test_suite_asn1write': [
+ re.compile(r'ASN.1 Write mpi.*'),
+ ],
+ 'test_suite_debug': [
+ re.compile(r'Debug print mbedtls_mpi.*'),
+ ],
+ # See ecp_light_only
+ 'test_suite_ssl': [
+ 'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_ecc_ffdh_no_bignum': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_ecc_ffdh_no_bignum',
+ 'component_driver': 'test_psa_crypto_config_accel_ecc_ffdh_no_bignum',
+ 'ignored_suites': [
+ # Modules replaced by drivers
+ 'ecp', 'ecdsa', 'ecdh', 'ecjpake', 'dhm',
+ 'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
+ 'bignum.generated', 'bignum.misc',
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # See ecp_light_only
+ 'test_suite_random': [
+ 'PSA classic wrapper: ECDSA signature (SECP256R1)',
+ ],
+ # See no_ecp_at_all
+ 'test_suite_pkparse': [
+ re.compile(r'Parse EC Key .*compressed\)'),
+ re.compile(r'Parse Public EC Key .*compressed\)'),
+ ],
+ 'test_suite_asn1parse': [
+ 'INTEGER too large for mpi',
+ ],
+ 'test_suite_asn1write': [
+ re.compile(r'ASN.1 Write mpi.*'),
+ ],
+ 'test_suite_debug': [
+ re.compile(r'Debug print mbedtls_mpi.*'),
+ ],
+ # See ecp_light_only
+ 'test_suite_ssl': [
+ 'Test configuration of groups for DHE through mbedtls_ssl_conf_curves()',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_ffdh_alg': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_ffdh',
+ 'component_driver': 'test_psa_crypto_config_accel_ffdh',
+ 'ignored_suites': ['dhm'],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_tfm_config': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_tfm_config',
+ 'component_driver': 'test_tfm_config_p256m_driver_accel_ec',
+ 'ignored_suites': [
+ # Modules replaced by drivers
+ 'asn1parse', 'asn1write',
+ 'ecp', 'ecdsa', 'ecdh', 'ecjpake',
+ 'bignum_core', 'bignum_random', 'bignum_mod', 'bignum_mod_raw',
+ 'bignum.generated', 'bignum.misc',
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # See ecp_light_only
+ 'test_suite_random': [
+ 'PSA classic wrapper: ECDSA signature (SECP256R1)',
+ ],
+ }
+ }
+ },
+ 'analyze_driver_vs_reference_rsa': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_psa_crypto_config_reference_rsa_crypto',
+ 'component_driver': 'test_psa_crypto_config_accel_rsa_crypto',
+ 'ignored_suites': [
+ # Modules replaced by drivers.
+ 'rsa', 'pkcs1_v15', 'pkcs1_v21',
+ # We temporarily don't care about PK stuff.
+ 'pk', 'pkwrite', 'pkparse'
+ ],
+ 'ignored_tests': {
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ # Following tests depend on RSA_C but are not about
+ # them really, just need to know some error code is there.
+ 'test_suite_error': [
+ 'Low and high error',
+ 'Single high error'
+ ],
+ # Constant time operations only used for PKCS1_V15
+ 'test_suite_constant_time': [
+ re.compile(r'mbedtls_ct_zeroize_if .*'),
+ re.compile(r'mbedtls_ct_memmove_left .*')
+ ],
+ 'test_suite_psa_crypto': [
+ # We don't support generate_key_ext entry points
+ # in drivers yet.
+ re.compile(r'PSA generate key ext: RSA, e=.*'),
+ ],
+ }
+ }
+ },
+ 'analyze_block_cipher_dispatch': {
+ 'test_function': do_analyze_driver_vs_reference,
+ 'args': {
+ 'component_ref': 'test_full_block_cipher_legacy_dispatch',
+ 'component_driver': 'test_full_block_cipher_psa_dispatch',
+ 'ignored_suites': [
+ # Skipped in the accelerated component
+ 'aes', 'aria', 'camellia',
+ # These require AES_C, ARIA_C or CAMELLIA_C to be enabled in
+ # order for the cipher module (actually cipher_wrapper) to work
+ # properly. However these symbols are disabled in the accelerated
+ # component so we ignore them.
+ 'cipher.ccm', 'cipher.gcm', 'cipher.aes', 'cipher.aria',
+ 'cipher.camellia',
+ ],
+ 'ignored_tests': {
+ 'test_suite_cmac': [
+ # Following tests require AES_C/ARIA_C/CAMELLIA_C to be enabled,
+ # but these are not available in the accelerated component.
+ 'CMAC null arguments',
+ re.compile('CMAC.* (AES|ARIA|Camellia).*'),
+ ],
+ 'test_suite_cipher.padding': [
+ # Following tests require AES_C/CAMELLIA_C to be enabled,
+ # but these are not available in the accelerated component.
+ re.compile('Set( non-existent)? padding with (AES|CAMELLIA).*'),
+ ],
+ 'test_suite_pkcs5': [
+ # The AES part of PKCS#5 PBES2 is not yet supported.
+ # The rest of PKCS#5 (PBKDF2) works, though.
+ re.compile(r'PBES2 .* AES-.*')
+ ],
+ 'test_suite_pkparse': [
+ # PEM (called by pkparse) requires AES_C in order to decrypt
+ # the key, but this is not available in the accelerated
+ # component.
+ re.compile('Parse RSA Key.*(password|AES-).*'),
+ ],
+ 'test_suite_pem': [
+ # Following tests require AES_C, but this is diabled in the
+ # accelerated component.
+ re.compile('PEM read .*AES.*'),
+ 'PEM read (unknown encryption algorithm)',
+ ],
+ 'test_suite_error': [
+ # Following tests depend on AES_C but are not about them
+ # really, just need to know some error code is there.
+ 'Single low error',
+ 'Low and high error',
+ ],
+ 'test_suite_version': [
+ # Similar to test_suite_error above.
+ 'Check for MBEDTLS_AES_C when already present',
+ ],
+ 'test_suite_platform': [
+ # Incompatible with sanitizers (e.g. ASan). If the driver
+ # component uses a sanitizer but the reference component
+ # doesn't, we have a PASS vs SKIP mismatch.
+ 'Check mbedtls_calloc overallocation',
+ ],
+ }
+ }
+ }
+}
+
+def main():
+ main_results = Results()
+
+ try:
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
+ help='Outcome file to analyze')
+ parser.add_argument('specified_tasks', default='all', nargs='?',
+ help='Analysis to be done. By default, run all tasks. '
+ 'With one or more TASK, run only those. '
+ 'TASK can be the name of a single task or '
+ 'comma/space-separated list of tasks. ')
+ parser.add_argument('--list', action='store_true',
+ help='List all available tasks and exit.')
+ parser.add_argument('--require-full-coverage', action='store_true',
+ dest='full_coverage', help="Require all available "
+ "test cases to be executed and issue an error "
+ "otherwise. This flag is ignored if 'task' is "
+ "neither 'all' nor 'analyze_coverage'")
+ options = parser.parse_args()
+
+ if options.list:
+ for task in KNOWN_TASKS:
+ print(task)
+ sys.exit(0)
+
+ if options.specified_tasks == 'all':
+ tasks_list = KNOWN_TASKS.keys()
+ else:
+ tasks_list = re.split(r'[, ]+', options.specified_tasks)
+ for task in tasks_list:
+ if task not in KNOWN_TASKS:
+ sys.stderr.write('invalid task: {}\n'.format(task))
+ sys.exit(2)
+
+ KNOWN_TASKS['analyze_coverage']['args']['full_coverage'] = options.full_coverage
+
+ # If the outcome file exists, parse it once and share the result
+ # among tasks to improve performance.
+ # Otherwise, it will be generated by execute_reference_driver_tests.
+ if not os.path.exists(options.outcomes):
+ if len(tasks_list) > 1:
+ sys.stderr.write("mutiple tasks found, please provide a valid outcomes file.\n")
+ sys.exit(2)
+
+ task_name = tasks_list[0]
+ task = KNOWN_TASKS[task_name]
+ if task['test_function'] != do_analyze_driver_vs_reference: # pylint: disable=comparison-with-callable
+ sys.stderr.write("please provide valid outcomes file for {}.\n".format(task_name))
+ sys.exit(2)
+
+ execute_reference_driver_tests(main_results,
+ task['args']['component_ref'],
+ task['args']['component_driver'],
+ options.outcomes)
+
+ outcomes = read_outcome_file(options.outcomes)
+
+ for task in tasks_list:
+ test_function = KNOWN_TASKS[task]['test_function']
+ test_args = KNOWN_TASKS[task]['args']
+ test_function(main_results, outcomes, test_args)
+
+ main_results.info("Overall results: {} warnings and {} errors",
+ main_results.warning_count, main_results.error_count)
+
+ sys.exit(0 if (main_results.error_count == 0) else 1)
+
+ except Exception: # pylint: disable=broad-except
+ # Print the backtrace and exit explicitly with our chosen status.
+ traceback.print_exc()
+ sys.exit(120)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/audit-validity-dates.py b/tests/scripts/audit-validity-dates.py
new file mode 100755
index 0000000..96b705a
--- /dev/null
+++ b/tests/scripts/audit-validity-dates.py
@@ -0,0 +1,469 @@
+#!/usr/bin/env python3
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""Audit validity date of X509 crt/crl/csr.
+
+This script is used to audit the validity date of crt/crl/csr used for testing.
+It prints the information about X.509 objects excluding the objects that
+are valid throughout the desired validity period. The data are collected
+from tests/data_files/ and tests/suites/*.data files by default.
+"""
+
+import os
+import re
+import typing
+import argparse
+import datetime
+import glob
+import logging
+import hashlib
+from enum import Enum
+
+# The script requires cryptography >= 35.0.0 which is only available
+# for Python >= 3.6.
+import cryptography
+from cryptography import x509
+
+from generate_test_code import FileWrapper
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import build_tree
+from mbedtls_dev import logging_util
+
+def check_cryptography_version():
+ match = re.match(r'^[0-9]+', cryptography.__version__)
+ if match is None or int(match.group(0)) < 35:
+ raise Exception("audit-validity-dates requires cryptography >= 35.0.0"
+ + "({} is too old)".format(cryptography.__version__))
+
+class DataType(Enum):
+ CRT = 1 # Certificate
+ CRL = 2 # Certificate Revocation List
+ CSR = 3 # Certificate Signing Request
+
+
+class DataFormat(Enum):
+ PEM = 1 # Privacy-Enhanced Mail
+ DER = 2 # Distinguished Encoding Rules
+
+
+class AuditData:
+ """Store data location, type and validity period of X.509 objects."""
+ #pylint: disable=too-few-public-methods
+ def __init__(self, data_type: DataType, x509_obj):
+ self.data_type = data_type
+ # the locations that the x509 object could be found
+ self.locations = [] # type: typing.List[str]
+ self.fill_validity_duration(x509_obj)
+ self._obj = x509_obj
+ encoding = cryptography.hazmat.primitives.serialization.Encoding.DER
+ self._identifier = hashlib.sha1(self._obj.public_bytes(encoding)).hexdigest()
+
+ @property
+ def identifier(self):
+ """
+ Identifier of the underlying X.509 object, which is consistent across
+ different runs.
+ """
+ return self._identifier
+
+ def fill_validity_duration(self, x509_obj):
+ """Read validity period from an X.509 object."""
+ # Certificate expires after "not_valid_after"
+ # Certificate is invalid before "not_valid_before"
+ if self.data_type == DataType.CRT:
+ self.not_valid_after = x509_obj.not_valid_after
+ self.not_valid_before = x509_obj.not_valid_before
+ # CertificateRevocationList expires after "next_update"
+ # CertificateRevocationList is invalid before "last_update"
+ elif self.data_type == DataType.CRL:
+ self.not_valid_after = x509_obj.next_update
+ self.not_valid_before = x509_obj.last_update
+ # CertificateSigningRequest is always valid.
+ elif self.data_type == DataType.CSR:
+ self.not_valid_after = datetime.datetime.max
+ self.not_valid_before = datetime.datetime.min
+ else:
+ raise ValueError("Unsupported file_type: {}".format(self.data_type))
+
+
+class X509Parser:
+ """A parser class to parse crt/crl/csr file or data in PEM/DER format."""
+ PEM_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}(?P<data>.*?)-{5}END (?P=type)-{5}'
+ PEM_TAG_REGEX = br'-{5}BEGIN (?P<type>.*?)-{5}\n'
+ PEM_TAGS = {
+ DataType.CRT: 'CERTIFICATE',
+ DataType.CRL: 'X509 CRL',
+ DataType.CSR: 'CERTIFICATE REQUEST'
+ }
+
+ def __init__(self,
+ backends:
+ typing.Dict[DataType,
+ typing.Dict[DataFormat,
+ typing.Callable[[bytes], object]]]) \
+ -> None:
+ self.backends = backends
+ self.__generate_parsers()
+
+ def __generate_parser(self, data_type: DataType):
+ """Parser generator for a specific DataType"""
+ tag = self.PEM_TAGS[data_type]
+ pem_loader = self.backends[data_type][DataFormat.PEM]
+ der_loader = self.backends[data_type][DataFormat.DER]
+ def wrapper(data: bytes):
+ pem_type = X509Parser.pem_data_type(data)
+ # It is in PEM format with target tag
+ if pem_type == tag:
+ return pem_loader(data)
+ # It is in PEM format without target tag
+ if pem_type:
+ return None
+ # It might be in DER format
+ try:
+ result = der_loader(data)
+ except ValueError:
+ result = None
+ return result
+ wrapper.__name__ = "{}.parser[{}]".format(type(self).__name__, tag)
+ return wrapper
+
+ def __generate_parsers(self):
+ """Generate parsers for all support DataType"""
+ self.parsers = {}
+ for data_type, _ in self.PEM_TAGS.items():
+ self.parsers[data_type] = self.__generate_parser(data_type)
+
+ def __getitem__(self, item):
+ return self.parsers[item]
+
+ @staticmethod
+ def pem_data_type(data: bytes) -> typing.Optional[str]:
+ """Get the tag from the data in PEM format
+
+ :param data: data to be checked in binary mode.
+ :return: PEM tag or "" when no tag detected.
+ """
+ m = re.search(X509Parser.PEM_TAG_REGEX, data)
+ if m is not None:
+ return m.group('type').decode('UTF-8')
+ else:
+ return None
+
+ @staticmethod
+ def check_hex_string(hex_str: str) -> bool:
+ """Check if the hex string is possibly DER data."""
+ hex_len = len(hex_str)
+ # At least 6 hex char for 3 bytes: Type + Length + Content
+ if hex_len < 6:
+ return False
+ # Check if Type (1 byte) is SEQUENCE.
+ if hex_str[0:2] != '30':
+ return False
+ # Check LENGTH (1 byte) value
+ content_len = int(hex_str[2:4], base=16)
+ consumed = 4
+ if content_len in (128, 255):
+ # Indefinite or Reserved
+ return False
+ elif content_len > 127:
+ # Definite, Long
+ length_len = (content_len - 128) * 2
+ content_len = int(hex_str[consumed:consumed+length_len], base=16)
+ consumed += length_len
+ # Check LENGTH
+ if hex_len != content_len * 2 + consumed:
+ return False
+ return True
+
+
+class Auditor:
+ """
+ A base class that uses X509Parser to parse files to a list of AuditData.
+
+ A subclass must implement the following methods:
+ - collect_default_files: Return a list of file names that are defaultly
+ used for parsing (auditing). The list will be stored in
+ Auditor.default_files.
+ - parse_file: Method that parses a single file to a list of AuditData.
+
+ A subclass may override the following methods:
+ - parse_bytes: Defaultly, it parses `bytes` that contains only one valid
+ X.509 data(DER/PEM format) to an X.509 object.
+ - walk_all: Defaultly, it iterates over all the files in the provided
+ file name list, calls `parse_file` for each file and stores the results
+ by extending the `results` passed to the function.
+ """
+ def __init__(self, logger):
+ self.logger = logger
+ self.default_files = self.collect_default_files()
+ self.parser = X509Parser({
+ DataType.CRT: {
+ DataFormat.PEM: x509.load_pem_x509_certificate,
+ DataFormat.DER: x509.load_der_x509_certificate
+ },
+ DataType.CRL: {
+ DataFormat.PEM: x509.load_pem_x509_crl,
+ DataFormat.DER: x509.load_der_x509_crl
+ },
+ DataType.CSR: {
+ DataFormat.PEM: x509.load_pem_x509_csr,
+ DataFormat.DER: x509.load_der_x509_csr
+ },
+ })
+
+ def collect_default_files(self) -> typing.List[str]:
+ """Collect the default files for parsing."""
+ raise NotImplementedError
+
+ def parse_file(self, filename: str) -> typing.List[AuditData]:
+ """
+ Parse a list of AuditData from file.
+
+ :param filename: name of the file to parse.
+ :return list of AuditData parsed from the file.
+ """
+ raise NotImplementedError
+
+ def parse_bytes(self, data: bytes):
+ """Parse AuditData from bytes."""
+ for data_type in list(DataType):
+ try:
+ result = self.parser[data_type](data)
+ except ValueError as val_error:
+ result = None
+ self.logger.warning(val_error)
+ if result is not None:
+ audit_data = AuditData(data_type, result)
+ return audit_data
+ return None
+
+ def walk_all(self,
+ results: typing.Dict[str, AuditData],
+ file_list: typing.Optional[typing.List[str]] = None) \
+ -> None:
+ """
+ Iterate over all the files in the list and get audit data. The
+ results will be written to `results` passed to this function.
+
+ :param results: The dictionary used to store the parsed
+ AuditData. The keys of this dictionary should
+ be the identifier of the AuditData.
+ """
+ if file_list is None:
+ file_list = self.default_files
+ for filename in file_list:
+ data_list = self.parse_file(filename)
+ for d in data_list:
+ if d.identifier in results:
+ results[d.identifier].locations.extend(d.locations)
+ else:
+ results[d.identifier] = d
+
+ @staticmethod
+ def find_test_dir():
+ """Get the relative path for the Mbed TLS test directory."""
+ return os.path.relpath(build_tree.guess_mbedtls_root() + '/tests')
+
+
+class TestDataAuditor(Auditor):
+ """Class for auditing files in `tests/data_files/`"""
+
+ def collect_default_files(self):
+ """Collect all files in `tests/data_files/`"""
+ test_dir = self.find_test_dir()
+ test_data_glob = os.path.join(test_dir, 'data_files/**')
+ data_files = [f for f in glob.glob(test_data_glob, recursive=True)
+ if os.path.isfile(f)]
+ return data_files
+
+ def parse_file(self, filename: str) -> typing.List[AuditData]:
+ """
+ Parse a list of AuditData from data file.
+
+ :param filename: name of the file to parse.
+ :return list of AuditData parsed from the file.
+ """
+ with open(filename, 'rb') as f:
+ data = f.read()
+
+ results = []
+ # Try to parse all PEM blocks.
+ is_pem = False
+ for idx, m in enumerate(re.finditer(X509Parser.PEM_REGEX, data, flags=re.S), 1):
+ is_pem = True
+ result = self.parse_bytes(data[m.start():m.end()])
+ if result is not None:
+ result.locations.append("{}#{}".format(filename, idx))
+ results.append(result)
+
+ # Might be DER format.
+ if not is_pem:
+ result = self.parse_bytes(data)
+ if result is not None:
+ result.locations.append("{}".format(filename))
+ results.append(result)
+
+ return results
+
+
+def parse_suite_data(data_f):
+ """
+ Parses .data file for test arguments that possiblly have a
+ valid X.509 data. If you need a more precise parser, please
+ use generate_test_code.parse_test_data instead.
+
+ :param data_f: file object of the data file.
+ :return: Generator that yields test function argument list.
+ """
+ for line in data_f:
+ line = line.strip()
+ # Skip comments
+ if line.startswith('#'):
+ continue
+
+ # Check parameters line
+ match = re.search(r'\A\w+(.*:)?\"', line)
+ if match:
+ # Read test vectors
+ parts = re.split(r'(?<!\\):', line)
+ parts = [x for x in parts if x]
+ args = parts[1:]
+ yield args
+
+
+class SuiteDataAuditor(Auditor):
+ """Class for auditing files in `tests/suites/*.data`"""
+
+ def collect_default_files(self):
+ """Collect all files in `tests/suites/*.data`"""
+ test_dir = self.find_test_dir()
+ suites_data_folder = os.path.join(test_dir, 'suites')
+ data_files = glob.glob(os.path.join(suites_data_folder, '*.data'))
+ return data_files
+
+ def parse_file(self, filename: str):
+ """
+ Parse a list of AuditData from test suite data file.
+
+ :param filename: name of the file to parse.
+ :return list of AuditData parsed from the file.
+ """
+ audit_data_list = []
+ data_f = FileWrapper(filename)
+ for test_args in parse_suite_data(data_f):
+ for idx, test_arg in enumerate(test_args):
+ match = re.match(r'"(?P<data>[0-9a-fA-F]+)"', test_arg)
+ if not match:
+ continue
+ if not X509Parser.check_hex_string(match.group('data')):
+ continue
+ audit_data = self.parse_bytes(bytes.fromhex(match.group('data')))
+ if audit_data is None:
+ continue
+ audit_data.locations.append("{}:{}:#{}".format(filename,
+ data_f.line_no,
+ idx + 1))
+ audit_data_list.append(audit_data)
+
+ return audit_data_list
+
+
+def list_all(audit_data: AuditData):
+ for loc in audit_data.locations:
+ print("{}\t{:20}\t{:20}\t{:3}\t{}".format(
+ audit_data.identifier,
+ audit_data.not_valid_before.isoformat(timespec='seconds'),
+ audit_data.not_valid_after.isoformat(timespec='seconds'),
+ audit_data.data_type.name,
+ loc))
+
+
+def main():
+ """
+ Perform argument parsing.
+ """
+ parser = argparse.ArgumentParser(description=__doc__)
+
+ parser.add_argument('-a', '--all',
+ action='store_true',
+ help='list the information of all the files')
+ parser.add_argument('-v', '--verbose',
+ action='store_true', dest='verbose',
+ help='show logs')
+ parser.add_argument('--from', dest='start_date',
+ help=('Start of desired validity period (UTC, YYYY-MM-DD). '
+ 'Default: today'),
+ metavar='DATE')
+ parser.add_argument('--to', dest='end_date',
+ help=('End of desired validity period (UTC, YYYY-MM-DD). '
+ 'Default: --from'),
+ metavar='DATE')
+ parser.add_argument('--data-files', action='append', nargs='*',
+ help='data files to audit',
+ metavar='FILE')
+ parser.add_argument('--suite-data-files', action='append', nargs='*',
+ help='suite data files to audit',
+ metavar='FILE')
+
+ args = parser.parse_args()
+
+ # start main routine
+ # setup logger
+ logger = logging.getLogger()
+ logging_util.configure_logger(logger)
+ logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR)
+
+ td_auditor = TestDataAuditor(logger)
+ sd_auditor = SuiteDataAuditor(logger)
+
+ data_files = []
+ suite_data_files = []
+ if args.data_files is None and args.suite_data_files is None:
+ data_files = td_auditor.default_files
+ suite_data_files = sd_auditor.default_files
+ else:
+ if args.data_files is not None:
+ data_files = [x for l in args.data_files for x in l]
+ if args.suite_data_files is not None:
+ suite_data_files = [x for l in args.suite_data_files for x in l]
+
+ # validity period start date
+ if args.start_date:
+ start_date = datetime.datetime.fromisoformat(args.start_date)
+ else:
+ start_date = datetime.datetime.today()
+ # validity period end date
+ if args.end_date:
+ end_date = datetime.datetime.fromisoformat(args.end_date)
+ else:
+ end_date = start_date
+
+ # go through all the files
+ audit_results = {}
+ td_auditor.walk_all(audit_results, data_files)
+ sd_auditor.walk_all(audit_results, suite_data_files)
+
+ logger.info("Total: {} objects found!".format(len(audit_results)))
+
+ # we filter out the files whose validity duration covers the provided
+ # duration.
+ filter_func = lambda d: (start_date < d.not_valid_before) or \
+ (d.not_valid_after < end_date)
+
+ sortby_end = lambda d: d.not_valid_after
+
+ if args.all:
+ filter_func = None
+
+ # filter and output the results
+ for d in sorted(filter(filter_func, audit_results.values()), key=sortby_end):
+ list_all(d)
+
+ logger.debug("Done!")
+
+check_cryptography_version()
+if __name__ == "__main__":
+ main()
diff --git a/tests/scripts/basic-build-test.sh b/tests/scripts/basic-build-test.sh
new file mode 100755
index 0000000..5261754
--- /dev/null
+++ b/tests/scripts/basic-build-test.sh
@@ -0,0 +1,250 @@
+#!/bin/sh
+
+# basic-build-test.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# Executes the basic test suites, captures the results, and generates a simple
+# test report and code coverage report.
+#
+# The tests include:
+# * Unit tests - executed using tests/scripts/run-test-suite.pl
+# * Self-tests - executed using the test suites above
+# * System tests - executed using tests/ssl-opt.sh
+# * Interoperability tests - executed using tests/compat.sh
+#
+# The tests focus on functionality and do not consider performance.
+#
+# Note the tests self-adapt due to configurations in include/mbedtls/mbedtls_config.h
+# which can lead to some tests being skipped, and can cause the number of
+# available tests to fluctuate.
+#
+# This script has been written to be generic and should work on any shell.
+#
+# Usage: basic-build-test.sh
+#
+
+# Abort on errors (and uninitiliased variables)
+set -eu
+
+if [ -d library -a -d include -a -d tests ]; then :; else
+ echo "Must be run from Mbed TLS root" >&2
+ exit 1
+fi
+
+: ${OPENSSL:="openssl"}
+: ${GNUTLS_CLI:="gnutls-cli"}
+: ${GNUTLS_SERV:="gnutls-serv"}
+
+# Used to make ssl-opt.sh deterministic.
+#
+# See also RELEASE_SEED in all.sh. Debugging is easier if both values are kept
+# in sync. If you change the value here because it breaks some tests, you'll
+# definitely want to change it in all.sh as well.
+: ${SEED:=1}
+export SEED
+
+# if MAKEFLAGS is not set add the -j option to speed up invocations of make
+if [ -z "${MAKEFLAGS+set}" ]; then
+ export MAKEFLAGS="-j"
+fi
+
+# To avoid setting OpenSSL and GnuTLS for each call to compat.sh and ssl-opt.sh
+# we just export the variables they require
+export OPENSSL="$OPENSSL"
+export GNUTLS_CLI="$GNUTLS_CLI"
+export GNUTLS_SERV="$GNUTLS_SERV"
+
+CONFIG_H='include/mbedtls/mbedtls_config.h'
+CONFIG_BAK="$CONFIG_H.bak"
+
+# Step 0 - print build environment info
+OPENSSL="$OPENSSL" \
+ GNUTLS_CLI="$GNUTLS_CLI" \
+ GNUTLS_SERV="$GNUTLS_SERV" \
+ scripts/output_env.sh
+echo
+
+# Step 1 - Make and instrumented build for code coverage
+export CFLAGS=' --coverage -g3 -O0 '
+export LDFLAGS=' --coverage'
+make clean
+cp "$CONFIG_H" "$CONFIG_BAK"
+scripts/config.py full
+make
+
+
+# Step 2 - Execute the tests
+TEST_OUTPUT=out_${PPID}
+cd tests
+if [ ! -f "seedfile" ]; then
+ dd if=/dev/urandom of="seedfile" bs=64 count=1
+fi
+echo
+
+# Step 2a - Unit Tests (keep going even if some tests fail)
+echo '################ Unit tests ################'
+perl scripts/run-test-suites.pl -v 2 |tee unit-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ Unit tests ^^^^^^^^^^^^^^^^'
+echo
+
+# Step 2b - System Tests (keep going even if some tests fail)
+echo
+echo '################ ssl-opt.sh ################'
+echo "ssl-opt.sh will use SEED=$SEED for udp_proxy"
+sh ssl-opt.sh |tee sys-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ ssl-opt.sh ^^^^^^^^^^^^^^^^'
+echo
+
+# Step 2c - Compatibility tests (keep going even if some tests fail)
+echo '################ compat.sh ################'
+{
+ echo '#### compat.sh: Default versions'
+ sh compat.sh
+ echo
+
+ echo '#### compat.sh: null cipher'
+ sh compat.sh -e '^$' -f 'NULL'
+ echo
+
+ echo '#### compat.sh: next (ARIA, ChaCha)'
+ OPENSSL="$OPENSSL_NEXT" sh compat.sh -e '^$' -f 'ARIA\|CHACHA'
+ echo
+} | tee compat-test-$TEST_OUTPUT
+echo '^^^^^^^^^^^^^^^^ compat.sh ^^^^^^^^^^^^^^^^'
+echo
+
+# Step 3 - Process the coverage report
+cd ..
+{
+ make lcov
+ echo SUCCESS
+} | tee tests/cov-$TEST_OUTPUT
+
+if [ "$(tail -n1 tests/cov-$TEST_OUTPUT)" != "SUCCESS" ]; then
+ echo >&2 "Fatal: 'make lcov' failed"
+ exit 2
+fi
+
+
+# Step 4 - Summarise the test report
+echo
+echo "========================================================================="
+echo "Test Report Summary"
+echo
+
+# A failure of the left-hand side of a pipe is ignored (this is a limitation
+# of sh). We'll use the presence of this file as a marker that the generation
+# of the report succeeded.
+rm -f "tests/basic-build-test-$$.ok"
+
+{
+
+ cd tests
+
+ # Step 4a - Unit tests
+ echo "Unit tests - tests/scripts/run-test-suites.pl"
+
+ PASSED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/test cases passed :[\t]*\([0-9]*\)/\1/p'| tr -d ' ')
+ SKIPPED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/skipped :[ \t]*\([0-9]*\)/\1/p'| tr -d ' ')
+ TOTAL_SUITES=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) .*, [0-9]* tests run)/\1/p'| tr -d ' ')
+ FAILED_TESTS=$(tail -n6 unit-test-$TEST_OUTPUT|sed -n -e 's/failed :[\t]*\([0-9]*\)/\1/p' |tr -d ' ')
+
+ echo "No test suites : $TOTAL_SUITES"
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $(($PASSED_TESTS + $FAILED_TESTS))"
+ echo "Total avail tests : $(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))"
+ echo
+
+ TOTAL_PASS=$PASSED_TESTS
+ TOTAL_FAIL=$FAILED_TESTS
+ TOTAL_SKIP=$SKIPPED_TESTS
+ TOTAL_AVAIL=$(($PASSED_TESTS + $FAILED_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($PASSED_TESTS + $FAILED_TESTS))
+
+ # Step 4b - TLS Options tests
+ echo "TLS Options tests - tests/ssl-opt.sh"
+
+ PASSED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p')
+ SKIPPED_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p')
+ TOTAL_TESTS=$(tail -n5 sys-test-$TEST_OUTPUT|sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p')
+ FAILED_TESTS=$(($TOTAL_TESTS - $PASSED_TESTS))
+
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $TOTAL_TESTS"
+ echo "Total avail tests : $(($TOTAL_TESTS + $SKIPPED_TESTS))"
+ echo
+
+ TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
+ TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
+ TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
+ TOTAL_AVAIL=$(($TOTAL_AVAIL + $TOTAL_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($TOTAL_EXED + $TOTAL_TESTS))
+
+
+ # Step 4c - System Compatibility tests
+ echo "System/Compatibility tests - tests/compat.sh"
+
+ PASSED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* (\([0-9]*\) \/ [0-9]* tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ SKIPPED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ [0-9]* tests (\([0-9]*\) skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ EXED_TESTS=$(cat compat-test-$TEST_OUTPUT | sed -n -e 's/.* ([0-9]* \/ \([0-9]*\) tests ([0-9]* skipped))$/\1/p' | awk 'BEGIN{ s = 0 } { s += $1 } END{ print s }')
+ FAILED_TESTS=$(($EXED_TESTS - $PASSED_TESTS))
+
+ echo "Passed : $PASSED_TESTS"
+ echo "Failed : $FAILED_TESTS"
+ echo "Skipped : $SKIPPED_TESTS"
+ echo "Total exec'd tests : $EXED_TESTS"
+ echo "Total avail tests : $(($EXED_TESTS + $SKIPPED_TESTS))"
+ echo
+
+ TOTAL_PASS=$(($TOTAL_PASS+$PASSED_TESTS))
+ TOTAL_FAIL=$(($TOTAL_FAIL+$FAILED_TESTS))
+ TOTAL_SKIP=$(($TOTAL_SKIP+$SKIPPED_TESTS))
+ TOTAL_AVAIL=$(($TOTAL_AVAIL + $EXED_TESTS + $SKIPPED_TESTS))
+ TOTAL_EXED=$(($TOTAL_EXED + $EXED_TESTS))
+
+
+ # Step 4d - Grand totals
+ echo "-------------------------------------------------------------------------"
+ echo "Total tests"
+
+ echo "Total Passed : $TOTAL_PASS"
+ echo "Total Failed : $TOTAL_FAIL"
+ echo "Total Skipped : $TOTAL_SKIP"
+ echo "Total exec'd tests : $TOTAL_EXED"
+ echo "Total avail tests : $TOTAL_AVAIL"
+ echo
+
+
+ # Step 4e - Coverage report
+ echo "Coverage statistics:"
+ sed -n '1,/^Overall coverage/d; /%/p' cov-$TEST_OUTPUT
+ echo
+
+ rm unit-test-$TEST_OUTPUT
+ rm sys-test-$TEST_OUTPUT
+ rm compat-test-$TEST_OUTPUT
+ rm cov-$TEST_OUTPUT
+
+ # Mark the report generation as having succeeded. This must be the
+ # last thing in the report generation.
+ touch "basic-build-test-$$.ok"
+} | tee coverage-summary.txt
+
+make clean
+
+if [ -f "$CONFIG_BAK" ]; then
+ mv "$CONFIG_BAK" "$CONFIG_H"
+fi
+
+# The file must exist, otherwise it means something went wrong while generating
+# the coverage report. If something did go wrong, rm will complain so this
+# script will exit with a failure status.
+rm "tests/basic-build-test-$$.ok"
diff --git a/tests/scripts/basic-in-docker.sh b/tests/scripts/basic-in-docker.sh
new file mode 100755
index 0000000..3aca3a1
--- /dev/null
+++ b/tests/scripts/basic-in-docker.sh
@@ -0,0 +1,37 @@
+#!/bin/bash -eu
+
+# basic-in-docker.sh
+#
+# Purpose
+# -------
+# This runs sanity checks and library tests in a Docker container. The tests
+# are run for both clang and gcc. The testing includes a full test run
+# in the default configuration, partial test runs in the reference
+# configurations, and some dependency tests.
+#
+# WARNING: the Dockerfile used by this script is no longer maintained! See
+# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
+# for the set of Docker images we use on the CI.
+#
+# Notes for users
+# ---------------
+# See docker_env.sh for prerequisites and other information.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+source tests/scripts/docker_env.sh
+
+run_in_docker tests/scripts/all.sh 'check_*'
+
+for compiler in clang gcc; do
+ run_in_docker -e CC=${compiler} cmake -D CMAKE_BUILD_TYPE:String="Check" .
+ run_in_docker -e CC=${compiler} make
+ run_in_docker -e CC=${compiler} make test
+ run_in_docker programs/test/selftest
+ run_in_docker -e OSSL_NO_DTLS=1 tests/compat.sh
+ run_in_docker tests/ssl-opt.sh -e '\(DTLS\|SCSV\).*openssl'
+ run_in_docker tests/scripts/test-ref-configs.pl
+ run_in_docker tests/scripts/depends.py curves
+ run_in_docker tests/scripts/depends.py kex
+done
diff --git a/tests/scripts/check-doxy-blocks.pl b/tests/scripts/check-doxy-blocks.pl
new file mode 100755
index 0000000..3199c2a
--- /dev/null
+++ b/tests/scripts/check-doxy-blocks.pl
@@ -0,0 +1,67 @@
+#!/usr/bin/env perl
+
+# Detect comment blocks that are likely meant to be doxygen blocks but aren't.
+#
+# More precisely, look for normal comment block containing '\'.
+# Of course one could use doxygen warnings, eg with:
+# sed -e '/EXTRACT/s/YES/NO/' doxygen/mbedtls.doxyfile | doxygen -
+# but that would warn about any undocumented item, while our goal is to find
+# items that are documented, but not marked as such by mistake.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use warnings;
+use strict;
+use File::Basename;
+
+# C/header files in the following directories will be checked
+my @directories = qw(include/mbedtls library doxygen/input);
+
+# very naive pattern to find directives:
+# everything with a backslach except '\0' and backslash at EOL
+my $doxy_re = qr/\\(?!0|\n)/;
+
+# Return an error code to the environment if a potential error in the
+# source code is found.
+my $exit_code = 0;
+
+sub check_file {
+ my ($fname) = @_;
+ open my $fh, '<', $fname or die "Failed to open '$fname': $!\n";
+
+ # first line of the last normal comment block,
+ # or 0 if not in a normal comment block
+ my $block_start = 0;
+ while (my $line = <$fh>) {
+ $block_start = $. if $line =~ m/\/\*(?![*!])/;
+ $block_start = 0 if $line =~ m/\*\//;
+ if ($block_start and $line =~ m/$doxy_re/) {
+ print "$fname:$block_start: directive on line $.\n";
+ $block_start = 0; # report only one directive per block
+ $exit_code = 1;
+ }
+ }
+
+ close $fh;
+}
+
+sub check_dir {
+ my ($dirname) = @_;
+ for my $file (<$dirname/*.[ch]>) {
+ check_file($file);
+ }
+}
+
+# Check that the script is being run from the project's root directory.
+for my $dir (@directories) {
+ if (! -d $dir) {
+ die "This script must be run from the Mbed TLS root directory";
+ } else {
+ check_dir($dir)
+ }
+}
+
+exit $exit_code;
+
+__END__
diff --git a/tests/scripts/check-generated-files.sh b/tests/scripts/check-generated-files.sh
new file mode 100755
index 0000000..2f20026
--- /dev/null
+++ b/tests/scripts/check-generated-files.sh
@@ -0,0 +1,151 @@
+#! /usr/bin/env sh
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# Check if generated files are up-to-date.
+
+set -eu
+
+if [ $# -ne 0 ] && [ "$1" = "--help" ]; then
+ cat <<EOF
+$0 [-l | -u]
+This script checks that all generated file are up-to-date. If some aren't, by
+default the scripts reports it and exits in error; with the -u option, it just
+updates them instead.
+
+ -u Update the files rather than return an error for out-of-date files.
+ -l List generated files, but do not update them.
+EOF
+ exit
+fi
+
+in_mbedtls_repo () {
+ test -d include -a -d library -a -d programs -a -d tests
+}
+
+in_tf_psa_crypto_repo () {
+ test -d include -a -d core -a -d drivers -a -d programs -a -d tests
+}
+
+if in_mbedtls_repo; then
+ library_dir='library'
+elif in_tf_psa_crypto_repo; then
+ library_dir='core'
+else
+ echo "Must be run from Mbed TLS root or TF-PSA-Crypto root" >&2
+ exit 1
+fi
+
+UPDATE=
+LIST=
+while getopts lu OPTLET; do
+ case $OPTLET in
+ l) LIST=1;;
+ u) UPDATE=1;;
+ esac
+done
+
+# check SCRIPT FILENAME[...]
+# check SCRIPT DIRECTORY
+# Run SCRIPT and check that it does not modify any of the specified files.
+# In the first form, there can be any number of FILENAMEs, which must be
+# regular files.
+# In the second form, there must be a single DIRECTORY, standing for the
+# list of files in the directory. Running SCRIPT must not modify any file
+# in the directory and must not add or remove files either.
+# If $UPDATE is empty, abort with an error status if a file is modified.
+check()
+{
+ SCRIPT=$1
+ shift
+
+ if [ -n "$LIST" ]; then
+ printf '%s\n' "$@"
+ return
+ fi
+
+ directory=
+ if [ -d "$1" ]; then
+ directory="$1"
+ rm -f "$directory"/*.bak
+ set -- "$1"/*
+ fi
+
+ for FILE in "$@"; do
+ if [ -e "$FILE" ]; then
+ cp -p "$FILE" "$FILE.bak"
+ else
+ rm -f "$FILE.bak"
+ fi
+ done
+
+ "$SCRIPT"
+
+ # Compare the script output to the old files and remove backups
+ for FILE in "$@"; do
+ if diff "$FILE" "$FILE.bak" >/dev/null 2>&1; then
+ # Move the original file back so that $FILE's timestamp doesn't
+ # change (avoids spurious rebuilds with make).
+ mv "$FILE.bak" "$FILE"
+ else
+ echo "'$FILE' was either modified or deleted by '$SCRIPT'"
+ if [ -z "$UPDATE" ]; then
+ exit 1
+ else
+ rm -f "$FILE.bak"
+ fi
+ fi
+ done
+
+ if [ -n "$directory" ]; then
+ old_list="$*"
+ set -- "$directory"/*
+ new_list="$*"
+ # Check if there are any new files
+ if [ "$old_list" != "$new_list" ]; then
+ echo "Files were deleted or created by '$SCRIPT'"
+ echo "Before: $old_list"
+ echo "After: $new_list"
+ if [ -z "$UPDATE" ]; then
+ exit 1
+ fi
+ fi
+ fi
+}
+
+# Note: if the format of calls to the "check" function changes, update
+# scripts/code_style.py accordingly. For generated C source files (*.h or *.c),
+# the format must be "check SCRIPT FILENAME...". For other source files,
+# any shell syntax is permitted (including e.g. command substitution).
+
+# Note: Instructions to generate those files are replicated in:
+# - **/Makefile (to (re)build them with make)
+# - **/CMakeLists.txt (to (re)build them with cmake)
+# - scripts/make_generated_files.bat (to generate them under Windows)
+
+# These checks are common to Mbed TLS and TF-PSA-Crypto
+check scripts/generate_psa_constants.py programs/psa/psa_constant_names_generated.c
+check tests/scripts/generate_bignum_tests.py $(tests/scripts/generate_bignum_tests.py --list)
+check tests/scripts/generate_ecp_tests.py $(tests/scripts/generate_ecp_tests.py --list)
+check tests/scripts/generate_psa_tests.py $(tests/scripts/generate_psa_tests.py --list)
+check scripts/generate_driver_wrappers.py $library_dir/psa_crypto_driver_wrappers.h $library_dir/psa_crypto_driver_wrappers_no_static.c
+
+# Additional checks for Mbed TLS only
+if in_mbedtls_repo; then
+ check scripts/generate_errors.pl library/error.c
+ check scripts/generate_query_config.pl programs/test/query_config.c
+ check scripts/generate_features.pl library/version_features.c
+ check scripts/generate_ssl_debug_helpers.py library/ssl_debug_helpers_generated.c
+ # generate_visualc_files enumerates source files (library/*.c). It doesn't
+ # care about their content, but the files must exist. So it must run after
+ # the step that creates or updates these files.
+ check scripts/generate_visualc_files.pl visualc/VS2017
+fi
+
+# Generated files that are present in the repository even in the development
+# branch. (This is intended to be temporary, until the generator scripts are
+# fully reviewed and the build scripts support a generated header file.)
+check tests/scripts/generate_psa_wrappers.py tests/include/test/psa_test_wrappers.h tests/src/psa_test_wrappers.c
diff --git a/tests/scripts/check-python-files.sh b/tests/scripts/check-python-files.sh
new file mode 100755
index 0000000..51e8079
--- /dev/null
+++ b/tests/scripts/check-python-files.sh
@@ -0,0 +1,68 @@
+#! /usr/bin/env sh
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# Purpose: check Python files for potential programming errors or maintenance
+# hurdles. Run pylint to detect some potential mistakes and enforce PEP8
+# coding standards. Run mypy to perform static type checking.
+
+# We'll keep going on errors and report the status at the end.
+ret=0
+
+if type python3 >/dev/null 2>/dev/null; then
+ PYTHON=python3
+else
+ PYTHON=python
+fi
+
+check_version () {
+ $PYTHON - "$2" <<EOF
+import packaging.version
+import sys
+import $1 as package
+actual = package.__version__
+wanted = sys.argv[1]
+if packaging.version.parse(actual) < packaging.version.parse(wanted):
+ sys.stderr.write("$1: version %s is too old (want %s)\n" % (actual, wanted))
+ exit(1)
+EOF
+}
+
+can_pylint () {
+ # Pylint 1.5.2 from Ubuntu 16.04 is too old:
+ # E: 34, 0: Unable to import 'mbedtls_dev' (import-error)
+ # Pylint 1.8.3 from Ubuntu 18.04 passed on the first commit containing this line.
+ check_version pylint 1.8.3
+}
+
+can_mypy () {
+ # mypy 0.770 is too old:
+ # tests/scripts/test_psa_constant_names.py:34: error: Cannot find implementation or library stub for module named 'mbedtls_dev'
+ # mypy 0.780 from pip passed on the first commit containing this line.
+ check_version mypy.version 0.780
+}
+
+# With just a --can-xxx option, check whether the tool for xxx is available
+# with an acceptable version, and exit without running any checks. The exit
+# status is true if the tool is available and acceptable and false otherwise.
+if [ "$1" = "--can-pylint" ]; then
+ can_pylint
+ exit
+elif [ "$1" = "--can-mypy" ]; then
+ can_mypy
+ exit
+fi
+
+echo 'Running pylint ...'
+$PYTHON -m pylint scripts/mbedtls_dev/*.py scripts/*.py tests/scripts/*.py || {
+ echo >&2 "pylint reported errors"
+ ret=1
+}
+
+echo
+echo 'Running mypy ...'
+$PYTHON -m mypy scripts/*.py tests/scripts/*.py ||
+ ret=1
+
+exit $ret
diff --git a/tests/scripts/check_files.py b/tests/scripts/check_files.py
new file mode 100755
index 0000000..d5a4b92
--- /dev/null
+++ b/tests/scripts/check_files.py
@@ -0,0 +1,537 @@
+#!/usr/bin/env python3
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script checks the current state of the source code for minor issues,
+including incorrect file permissions, presence of tabs, non-Unix line endings,
+trailing whitespace, and presence of UTF-8 BOM.
+Note: requires python 3, must be run from Mbed TLS root.
+"""
+
+import argparse
+import codecs
+import inspect
+import logging
+import os
+import re
+import subprocess
+import sys
+try:
+ from typing import FrozenSet, Optional, Pattern # pylint: disable=unused-import
+except ImportError:
+ pass
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import build_tree
+
+
+class FileIssueTracker:
+ """Base class for file-wide issue tracking.
+
+ To implement a checker that processes a file as a whole, inherit from
+ this class and implement `check_file_for_issue` and define ``heading``.
+
+ ``suffix_exemptions``: files whose name ends with a string in this set
+ will not be checked.
+
+ ``path_exemptions``: files whose path (relative to the root of the source
+ tree) matches this regular expression will not be checked. This can be
+ ``None`` to match no path. Paths are normalized and converted to ``/``
+ separators before matching.
+
+ ``heading``: human-readable description of the issue
+ """
+
+ suffix_exemptions = frozenset() #type: FrozenSet[str]
+ path_exemptions = None #type: Optional[Pattern[str]]
+ # heading must be defined in derived classes.
+ # pylint: disable=no-member
+
+ def __init__(self):
+ self.files_with_issues = {}
+
+ @staticmethod
+ def normalize_path(filepath):
+ """Normalize ``filepath`` with / as the directory separator."""
+ filepath = os.path.normpath(filepath)
+ # On Windows, we may have backslashes to separate directories.
+ # We need slashes to match exemption lists.
+ seps = os.path.sep
+ if os.path.altsep is not None:
+ seps += os.path.altsep
+ return '/'.join(filepath.split(seps))
+
+ def should_check_file(self, filepath):
+ """Whether the given file name should be checked.
+
+ Files whose name ends with a string listed in ``self.suffix_exemptions``
+ or whose path matches ``self.path_exemptions`` will not be checked.
+ """
+ for files_exemption in self.suffix_exemptions:
+ if filepath.endswith(files_exemption):
+ return False
+ if self.path_exemptions and \
+ re.match(self.path_exemptions, self.normalize_path(filepath)):
+ return False
+ return True
+
+ def check_file_for_issue(self, filepath):
+ """Check the specified file for the issue that this class is for.
+
+ Subclasses must implement this method.
+ """
+ raise NotImplementedError
+
+ def record_issue(self, filepath, line_number):
+ """Record that an issue was found at the specified location."""
+ if filepath not in self.files_with_issues.keys():
+ self.files_with_issues[filepath] = []
+ self.files_with_issues[filepath].append(line_number)
+
+ def output_file_issues(self, logger):
+ """Log all the locations where the issue was found."""
+ if self.files_with_issues.values():
+ logger.info(self.heading)
+ for filename, lines in sorted(self.files_with_issues.items()):
+ if lines:
+ logger.info("{}: {}".format(
+ filename, ", ".join(str(x) for x in lines)
+ ))
+ else:
+ logger.info(filename)
+ logger.info("")
+
+BINARY_FILE_PATH_RE_LIST = [
+ r'docs/.*\.pdf\Z',
+ r'docs/.*\.png\Z',
+ r'programs/fuzz/corpuses/[^.]+\Z',
+ r'tests/data_files/[^.]+\Z',
+ r'tests/data_files/.*\.(crt|csr|db|der|key|pubkey)\Z',
+ r'tests/data_files/.*\.req\.[^/]+\Z',
+ r'tests/data_files/.*malformed[^/]+\Z',
+ r'tests/data_files/format_pkcs12\.fmt\Z',
+ r'tests/data_files/.*\.bin\Z',
+]
+BINARY_FILE_PATH_RE = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST))
+
+class LineIssueTracker(FileIssueTracker):
+ """Base class for line-by-line issue tracking.
+
+ To implement a checker that processes files line by line, inherit from
+ this class and implement `line_with_issue`.
+ """
+
+ # Exclude binary files.
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def issue_with_line(self, line, filepath, line_number):
+ """Check the specified line for the issue that this class is for.
+
+ Subclasses must implement this method.
+ """
+ raise NotImplementedError
+
+ def check_file_line(self, filepath, line, line_number):
+ if self.issue_with_line(line, filepath, line_number):
+ self.record_issue(filepath, line_number)
+
+ def check_file_for_issue(self, filepath):
+ """Check the lines of the specified file.
+
+ Subclasses must implement the ``issue_with_line`` method.
+ """
+ with open(filepath, "rb") as f:
+ for i, line in enumerate(iter(f.readline, b"")):
+ self.check_file_line(filepath, line, i + 1)
+
+
+def is_windows_file(filepath):
+ _root, ext = os.path.splitext(filepath)
+ return ext in ('.bat', '.dsp', '.dsw', '.sln', '.vcxproj')
+
+
+class ShebangIssueTracker(FileIssueTracker):
+ """Track files with a bad, missing or extraneous shebang line.
+
+ Executable scripts must start with a valid shebang (#!) line.
+ """
+
+ heading = "Invalid shebang line:"
+
+ # Allow either /bin/sh, /bin/bash, or /usr/bin/env.
+ # Allow at most one argument (this is a Linux limitation).
+ # For sh and bash, the argument if present must be options.
+ # For env, the argument must be the base name of the interpreter.
+ _shebang_re = re.compile(rb'^#! ?(?:/bin/(bash|sh)(?: -[^\n ]*)?'
+ rb'|/usr/bin/env ([^\n /]+))$')
+ _extensions = {
+ b'bash': 'sh',
+ b'perl': 'pl',
+ b'python3': 'py',
+ b'sh': 'sh',
+ }
+
+ path_exemptions = re.compile(r'tests/scripts/quiet/.*')
+
+ def is_valid_shebang(self, first_line, filepath):
+ m = re.match(self._shebang_re, first_line)
+ if not m:
+ return False
+ interpreter = m.group(1) or m.group(2)
+ if interpreter not in self._extensions:
+ return False
+ if not filepath.endswith('.' + self._extensions[interpreter]):
+ return False
+ return True
+
+ def check_file_for_issue(self, filepath):
+ is_executable = os.access(filepath, os.X_OK)
+ with open(filepath, "rb") as f:
+ first_line = f.readline()
+ if first_line.startswith(b'#!'):
+ if not is_executable:
+ # Shebang on a non-executable file
+ self.files_with_issues[filepath] = None
+ elif not self.is_valid_shebang(first_line, filepath):
+ self.files_with_issues[filepath] = [1]
+ elif is_executable:
+ # Executable without a shebang
+ self.files_with_issues[filepath] = None
+
+
+class EndOfFileNewlineIssueTracker(FileIssueTracker):
+ """Track files that end with an incomplete line
+ (no newline character at the end of the last line)."""
+
+ heading = "Missing newline at end of file:"
+
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def check_file_for_issue(self, filepath):
+ with open(filepath, "rb") as f:
+ try:
+ f.seek(-1, 2)
+ except OSError:
+ # This script only works on regular files. If we can't seek
+ # 1 before the end, it means that this position is before
+ # the beginning of the file, i.e. that the file is empty.
+ return
+ if f.read(1) != b"\n":
+ self.files_with_issues[filepath] = None
+
+
+class Utf8BomIssueTracker(FileIssueTracker):
+ """Track files that start with a UTF-8 BOM.
+ Files should be ASCII or UTF-8. Valid UTF-8 does not start with a BOM."""
+
+ heading = "UTF-8 BOM present:"
+
+ suffix_exemptions = frozenset([".vcxproj", ".sln"])
+ path_exemptions = BINARY_FILE_PATH_RE
+
+ def check_file_for_issue(self, filepath):
+ with open(filepath, "rb") as f:
+ if f.read().startswith(codecs.BOM_UTF8):
+ self.files_with_issues[filepath] = None
+
+
+class UnicodeIssueTracker(LineIssueTracker):
+ """Track lines with invalid characters or invalid text encoding."""
+
+ heading = "Invalid UTF-8 or forbidden character:"
+
+ # Only allow valid UTF-8, and only other explicitly allowed characters.
+ # We deliberately exclude all characters that aren't a simple non-blank,
+ # non-zero-width glyph, apart from a very small set (tab, ordinary space,
+ # line breaks, "basic" no-break space and soft hyphen). In particular,
+ # non-ASCII control characters, combinig characters, and Unicode state
+ # changes (e.g. right-to-left text) are forbidden.
+ # Note that we do allow some characters with a risk of visual confusion,
+ # for example '-' (U+002D HYPHEN-MINUS) vs '' (U+00AD SOFT HYPHEN) vs
+ # '‐' (U+2010 HYPHEN), or 'A' (U+0041 LATIN CAPITAL LETTER A) vs
+ # 'Α' (U+0391 GREEK CAPITAL LETTER ALPHA).
+ GOOD_CHARACTERS = ''.join([
+ '\t\n\r -~', # ASCII (tabs and line endings are checked separately)
+ '\u00A0-\u00FF', # Latin-1 Supplement (for NO-BREAK SPACE and punctuation)
+ '\u2010-\u2027\u2030-\u205E', # General Punctuation (printable)
+ '\u2070\u2071\u2074-\u208E\u2090-\u209C', # Superscripts and Subscripts
+ '\u2190-\u21FF', # Arrows
+ '\u2200-\u22FF', # Mathematical Symbols
+ '\u2500-\u257F' # Box Drawings characters used in markdown trees
+ ])
+ # Allow any of the characters and ranges above, and anything classified
+ # as a word constituent.
+ GOOD_CHARACTERS_RE = re.compile(r'[\w{}]+\Z'.format(GOOD_CHARACTERS))
+
+ def issue_with_line(self, line, _filepath, line_number):
+ try:
+ text = line.decode('utf-8')
+ except UnicodeDecodeError:
+ return True
+ if line_number == 1 and text.startswith('\uFEFF'):
+ # Strip BOM (U+FEFF ZERO WIDTH NO-BREAK SPACE) at the beginning.
+ # Which files are allowed to have a BOM is handled in
+ # Utf8BomIssueTracker.
+ text = text[1:]
+ return not self.GOOD_CHARACTERS_RE.match(text)
+
+class UnixLineEndingIssueTracker(LineIssueTracker):
+ """Track files with non-Unix line endings (i.e. files with CR)."""
+
+ heading = "Non-Unix line endings:"
+
+ def should_check_file(self, filepath):
+ if not super().should_check_file(filepath):
+ return False
+ return not is_windows_file(filepath)
+
+ def issue_with_line(self, line, _filepath, _line_number):
+ return b"\r" in line
+
+
+class WindowsLineEndingIssueTracker(LineIssueTracker):
+ """Track files with non-Windows line endings (i.e. CR or LF not in CRLF)."""
+
+ heading = "Non-Windows line endings:"
+
+ def should_check_file(self, filepath):
+ if not super().should_check_file(filepath):
+ return False
+ return is_windows_file(filepath)
+
+ def issue_with_line(self, line, _filepath, _line_number):
+ return not line.endswith(b"\r\n") or b"\r" in line[:-2]
+
+
+class TrailingWhitespaceIssueTracker(LineIssueTracker):
+ """Track lines with trailing whitespace."""
+
+ heading = "Trailing whitespace:"
+ suffix_exemptions = frozenset([".dsp", ".md"])
+
+ def issue_with_line(self, line, _filepath, _line_number):
+ return line.rstrip(b"\r\n") != line.rstrip()
+
+
+class TabIssueTracker(LineIssueTracker):
+ """Track lines with tabs."""
+
+ heading = "Tabs present:"
+ suffix_exemptions = frozenset([
+ ".make",
+ ".pem", # some openssl dumps have tabs
+ ".sln",
+ "/.gitmodules",
+ "/Makefile",
+ "/Makefile.inc",
+ "/generate_visualc_files.pl",
+ ])
+
+ def issue_with_line(self, line, _filepath, _line_number):
+ return b"\t" in line
+
+
+class MergeArtifactIssueTracker(LineIssueTracker):
+ """Track lines with merge artifacts.
+ These are leftovers from a ``git merge`` that wasn't fully edited."""
+
+ heading = "Merge artifact:"
+
+ def issue_with_line(self, line, _filepath, _line_number):
+ # Detect leftover git conflict markers.
+ if line.startswith(b'<<<<<<< ') or line.startswith(b'>>>>>>> '):
+ return True
+ if line.startswith(b'||||||| '): # from merge.conflictStyle=diff3
+ return True
+ if line.rstrip(b'\r\n') == b'=======' and \
+ not _filepath.endswith('.md'):
+ return True
+ return False
+
+
+def this_location():
+ frame = inspect.currentframe()
+ assert frame is not None
+ info = inspect.getframeinfo(frame)
+ return os.path.basename(info.filename), info.lineno
+THIS_FILE_BASE_NAME, LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER = this_location()
+
+class LicenseIssueTracker(LineIssueTracker):
+ """Check copyright statements and license indications.
+
+ This class only checks that statements are correct if present. It does
+ not enforce the presence of statements in each file.
+ """
+
+ heading = "License issue:"
+
+ LICENSE_EXEMPTION_RE_LIST = [
+ # Third-party code, other than whitelisted third-party modules,
+ # may be under a different license.
+ r'3rdparty/(?!(p256-m)/.*)',
+ # Documentation explaining the license may have accidental
+ # false positives.
+ r'(ChangeLog|LICENSE|[-0-9A-Z_a-z]+\.md)\Z',
+ # Files imported from TF-M, and not used except in test builds,
+ # may be under a different license.
+ r'configs/ext/crypto_config_profile_medium\.h\Z',
+ r'configs/ext/tfm_mbedcrypto_config_profile_medium\.h\Z',
+ r'configs/ext/README\.md\Z',
+ # Third-party file.
+ r'dco\.txt\Z',
+ ]
+ path_exemptions = re.compile('|'.join(BINARY_FILE_PATH_RE_LIST +
+ LICENSE_EXEMPTION_RE_LIST))
+
+ COPYRIGHT_HOLDER = rb'The Mbed TLS Contributors'
+ # Catch "Copyright foo", "Copyright (C) foo", "Copyright © foo", etc.
+ COPYRIGHT_RE = re.compile(rb'.*\bcopyright\s+((?:\w|\s|[()]|[^ -~])*\w)', re.I)
+
+ SPDX_HEADER_KEY = b'SPDX-License-Identifier'
+ LICENSE_IDENTIFIER = b'Apache-2.0 OR GPL-2.0-or-later'
+ SPDX_RE = re.compile(br'.*?(' +
+ re.escape(SPDX_HEADER_KEY) +
+ br')(:\s*(.*?)\W*\Z|.*)', re.I)
+
+ LICENSE_MENTION_RE = re.compile(rb'.*(?:' + rb'|'.join([
+ rb'Apache License',
+ rb'General Public License',
+ ]) + rb')', re.I)
+
+ def __init__(self):
+ super().__init__()
+ # Record what problem was caused. We can't easily report it due to
+ # the structure of the script. To be fixed after
+ # https://github.com/Mbed-TLS/mbedtls/pull/2506
+ self.problem = None
+
+ def issue_with_line(self, line, filepath, line_number):
+ #pylint: disable=too-many-return-statements
+
+ # Use endswith() rather than the more correct os.path.basename()
+ # because experimentally, it makes a significant difference to
+ # the running time.
+ if filepath.endswith(THIS_FILE_BASE_NAME) and \
+ line_number > LINE_NUMBER_BEFORE_LICENSE_ISSUE_TRACKER:
+ # Avoid false positives from the code in this class.
+ # Also skip the rest of this file, which is highly unlikely to
+ # contain any problematic statements since we put those near the
+ # top of files.
+ return False
+
+ m = self.COPYRIGHT_RE.match(line)
+ if m and m.group(1) != self.COPYRIGHT_HOLDER:
+ self.problem = 'Invalid copyright line'
+ return True
+
+ m = self.SPDX_RE.match(line)
+ if m:
+ if m.group(1) != self.SPDX_HEADER_KEY:
+ self.problem = 'Misspelled ' + self.SPDX_HEADER_KEY.decode()
+ return True
+ if not m.group(3):
+ self.problem = 'Improperly formatted SPDX license identifier'
+ return True
+ if m.group(3) != self.LICENSE_IDENTIFIER:
+ self.problem = 'Wrong SPDX license identifier'
+ return True
+
+ m = self.LICENSE_MENTION_RE.match(line)
+ if m:
+ self.problem = 'Suspicious license mention'
+ return True
+
+ return False
+
+
+class IntegrityChecker:
+ """Sanity-check files under the current directory."""
+
+ def __init__(self, log_file):
+ """Instantiate the sanity checker.
+ Check files under the current directory.
+ Write a report of issues to log_file."""
+ build_tree.check_repo_path()
+ self.logger = None
+ self.setup_logger(log_file)
+ self.issues_to_check = [
+ ShebangIssueTracker(),
+ EndOfFileNewlineIssueTracker(),
+ Utf8BomIssueTracker(),
+ UnicodeIssueTracker(),
+ UnixLineEndingIssueTracker(),
+ WindowsLineEndingIssueTracker(),
+ TrailingWhitespaceIssueTracker(),
+ TabIssueTracker(),
+ MergeArtifactIssueTracker(),
+ LicenseIssueTracker(),
+ ]
+
+ def setup_logger(self, log_file, level=logging.INFO):
+ """Log to log_file if provided, or to stderr if None."""
+ self.logger = logging.getLogger()
+ self.logger.setLevel(level)
+ if log_file:
+ handler = logging.FileHandler(log_file)
+ self.logger.addHandler(handler)
+ else:
+ console = logging.StreamHandler()
+ self.logger.addHandler(console)
+
+ @staticmethod
+ def collect_files():
+ """Return the list of files to check.
+
+ These are the regular files commited into Git.
+ """
+ bytes_output = subprocess.check_output(['git', 'ls-files', '-z'])
+ bytes_filepaths = bytes_output.split(b'\0')[:-1]
+ ascii_filepaths = map(lambda fp: fp.decode('ascii'), bytes_filepaths)
+ # Filter out directories. Normally Git doesn't list directories
+ # (it only knows about the files inside them), but there is
+ # at least one case where 'git ls-files' includes a directory:
+ # submodules. Just skip submodules (and any other directories).
+ ascii_filepaths = [fp for fp in ascii_filepaths
+ if os.path.isfile(fp)]
+ # Prepend './' to files in the top-level directory so that
+ # something like `'/Makefile' in fp` matches in the top-level
+ # directory as well as in subdirectories.
+ return [fp if os.path.dirname(fp) else os.path.join(os.curdir, fp)
+ for fp in ascii_filepaths]
+
+ def check_files(self):
+ """Check all files for all issues."""
+ for issue_to_check in self.issues_to_check:
+ for filepath in self.collect_files():
+ if issue_to_check.should_check_file(filepath):
+ issue_to_check.check_file_for_issue(filepath)
+
+ def output_issues(self):
+ """Log the issues found and their locations.
+
+ Return 1 if there were issues, 0 otherwise.
+ """
+ integrity_return_code = 0
+ for issue_to_check in self.issues_to_check:
+ if issue_to_check.files_with_issues:
+ integrity_return_code = 1
+ issue_to_check.output_file_issues(self.logger)
+ return integrity_return_code
+
+
+def run_main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument(
+ "-l", "--log_file", type=str, help="path to optional output log",
+ )
+ check_args = parser.parse_args()
+ integrity_check = IntegrityChecker(check_args.log_file)
+ integrity_check.check_files()
+ return_code = integrity_check.output_issues()
+ sys.exit(return_code)
+
+
+if __name__ == "__main__":
+ run_main()
diff --git a/tests/scripts/check_names.py b/tests/scripts/check_names.py
new file mode 100755
index 0000000..9e8ed21
--- /dev/null
+++ b/tests/scripts/check_names.py
@@ -0,0 +1,965 @@
+#!/usr/bin/env python3
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script confirms that the naming of all symbols and identifiers in Mbed TLS
+are consistent with the house style and are also self-consistent. It only runs
+on Linux and macOS since it depends on nm.
+
+It contains two major Python classes, CodeParser and NameChecker. They both have
+a comprehensive "run-all" function (comprehensive_parse() and perform_checks())
+but the individual functions can also be used for specific needs.
+
+CodeParser makes heavy use of regular expressions to parse the code, and is
+dependent on the current code formatting. Many Python C parser libraries require
+preprocessed C code, which means no macro parsing. Compiler tools are also not
+very helpful when we want the exact location in the original source (which
+becomes impossible when e.g. comments are stripped).
+
+NameChecker performs the following checks:
+
+- All exported and available symbols in the library object files, are explicitly
+ declared in the header files. This uses the nm command.
+- All macros, constants, and identifiers (function names, struct names, etc)
+ follow the required regex pattern.
+- Typo checking: All words that begin with MBED|PSA exist as macros or constants.
+
+The script returns 0 on success, 1 on test failure, and 2 if there is a script
+error. It must be run from Mbed TLS root.
+"""
+
+import abc
+import argparse
+import fnmatch
+import glob
+import textwrap
+import os
+import sys
+import traceback
+import re
+import enum
+import shutil
+import subprocess
+import logging
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import build_tree
+
+
+# Naming patterns to check against. These are defined outside the NameCheck
+# class for ease of modification.
+PUBLIC_MACRO_PATTERN = r"^(MBEDTLS|PSA)_[0-9A-Z_]*[0-9A-Z]$"
+INTERNAL_MACRO_PATTERN = r"^[0-9A-Za-z_]*[0-9A-Z]$"
+CONSTANTS_PATTERN = PUBLIC_MACRO_PATTERN
+IDENTIFIER_PATTERN = r"^(mbedtls|psa)_[0-9a-z_]*[0-9a-z]$"
+
+class Match(): # pylint: disable=too-few-public-methods
+ """
+ A class representing a match, together with its found position.
+
+ Fields:
+ * filename: the file that the match was in.
+ * line: the full line containing the match.
+ * line_no: the line number.
+ * pos: a tuple of (start, end) positions on the line where the match is.
+ * name: the match itself.
+ """
+ def __init__(self, filename, line, line_no, pos, name):
+ # pylint: disable=too-many-arguments
+ self.filename = filename
+ self.line = line
+ self.line_no = line_no
+ self.pos = pos
+ self.name = name
+
+ def __str__(self):
+ """
+ Return a formatted code listing representation of the erroneous line.
+ """
+ gutter = format(self.line_no, "4d")
+ underline = self.pos[0] * " " + (self.pos[1] - self.pos[0]) * "^"
+
+ return (
+ " {0} |\n".format(" " * len(gutter)) +
+ " {0} | {1}".format(gutter, self.line) +
+ " {0} | {1}\n".format(" " * len(gutter), underline)
+ )
+
+class Problem(abc.ABC): # pylint: disable=too-few-public-methods
+ """
+ An abstract parent class representing a form of static analysis error.
+ It extends an Abstract Base Class, which means it is not instantiable, and
+ it also mandates certain abstract methods to be implemented in subclasses.
+ """
+ # Class variable to control the quietness of all problems
+ quiet = False
+ def __init__(self):
+ self.textwrapper = textwrap.TextWrapper()
+ self.textwrapper.width = 80
+ self.textwrapper.initial_indent = " > "
+ self.textwrapper.subsequent_indent = " "
+
+ def __str__(self):
+ """
+ Unified string representation method for all Problems.
+ """
+ if self.__class__.quiet:
+ return self.quiet_output()
+ return self.verbose_output()
+
+ @abc.abstractmethod
+ def quiet_output(self):
+ """
+ The output when --quiet is enabled.
+ """
+ pass
+
+ @abc.abstractmethod
+ def verbose_output(self):
+ """
+ The default output with explanation and code snippet if appropriate.
+ """
+ pass
+
+class SymbolNotInHeader(Problem): # pylint: disable=too-few-public-methods
+ """
+ A problem that occurs when an exported/available symbol in the object file
+ is not explicitly declared in header files. Created with
+ NameCheck.check_symbols_declared_in_header()
+
+ Fields:
+ * symbol_name: the name of the symbol.
+ """
+ def __init__(self, symbol_name):
+ self.symbol_name = symbol_name
+ Problem.__init__(self)
+
+ def quiet_output(self):
+ return "{0}".format(self.symbol_name)
+
+ def verbose_output(self):
+ return self.textwrapper.fill(
+ "'{0}' was found as an available symbol in the output of nm, "
+ "however it was not declared in any header files."
+ .format(self.symbol_name))
+
+class PatternMismatch(Problem): # pylint: disable=too-few-public-methods
+ """
+ A problem that occurs when something doesn't match the expected pattern.
+ Created with NameCheck.check_match_pattern()
+
+ Fields:
+ * pattern: the expected regex pattern
+ * match: the Match object in question
+ """
+ def __init__(self, pattern, match):
+ self.pattern = pattern
+ self.match = match
+ Problem.__init__(self)
+
+
+ def quiet_output(self):
+ return (
+ "{0}:{1}:{2}"
+ .format(self.match.filename, self.match.line_no, self.match.name)
+ )
+
+ def verbose_output(self):
+ return self.textwrapper.fill(
+ "{0}:{1}: '{2}' does not match the required pattern '{3}'."
+ .format(
+ self.match.filename,
+ self.match.line_no,
+ self.match.name,
+ self.pattern
+ )
+ ) + "\n" + str(self.match)
+
+class Typo(Problem): # pylint: disable=too-few-public-methods
+ """
+ A problem that occurs when a word using MBED or PSA doesn't
+ appear to be defined as constants nor enum values. Created with
+ NameCheck.check_for_typos()
+
+ Fields:
+ * match: the Match object of the MBED|PSA name in question.
+ """
+ def __init__(self, match):
+ self.match = match
+ Problem.__init__(self)
+
+ def quiet_output(self):
+ return (
+ "{0}:{1}:{2}"
+ .format(self.match.filename, self.match.line_no, self.match.name)
+ )
+
+ def verbose_output(self):
+ return self.textwrapper.fill(
+ "{0}:{1}: '{2}' looks like a typo. It was not found in any "
+ "macros or any enums. If this is not a typo, put "
+ "//no-check-names after it."
+ .format(self.match.filename, self.match.line_no, self.match.name)
+ ) + "\n" + str(self.match)
+
+class CodeParser():
+ """
+ Class for retrieving files and parsing the code. This can be used
+ independently of the checks that NameChecker performs, for example for
+ list_internal_identifiers.py.
+ """
+ def __init__(self, log):
+ self.log = log
+ build_tree.check_repo_path()
+
+ # Memo for storing "glob expression": set(filepaths)
+ self.files = {}
+
+ # Globally excluded filenames.
+ # Note that "*" can match directory separators in exclude lists.
+ self.excluded_files = ["*/bn_mul", "*/compat-2.x.h"]
+
+ def comprehensive_parse(self):
+ """
+ Comprehensive ("default") function to call each parsing function and
+ retrieve various elements of the code, together with the source location.
+
+ Returns a dict of parsed item key to the corresponding List of Matches.
+ """
+ self.log.info("Parsing source code...")
+ self.log.debug(
+ "The following files are excluded from the search: {}"
+ .format(str(self.excluded_files))
+ )
+
+ all_macros = {"public": [], "internal": [], "private":[]}
+ all_macros["public"] = self.parse_macros([
+ "include/mbedtls/*.h",
+ "include/psa/*.h",
+ "3rdparty/everest/include/everest/everest.h",
+ "3rdparty/everest/include/everest/x25519.h"
+ ])
+ all_macros["internal"] = self.parse_macros([
+ "library/*.h",
+ "tests/include/test/drivers/*.h",
+ ])
+ all_macros["private"] = self.parse_macros([
+ "library/*.c",
+ ])
+ enum_consts = self.parse_enum_consts([
+ "include/mbedtls/*.h",
+ "include/psa/*.h",
+ "library/*.h",
+ "library/*.c",
+ "3rdparty/everest/include/everest/everest.h",
+ "3rdparty/everest/include/everest/x25519.h"
+ ])
+ identifiers, excluded_identifiers = self.parse_identifiers([
+ "include/mbedtls/*.h",
+ "include/psa/*.h",
+ "library/*.h",
+ "3rdparty/everest/include/everest/everest.h",
+ "3rdparty/everest/include/everest/x25519.h"
+ ], ["3rdparty/p256-m/p256-m/p256-m.h"])
+ mbed_psa_words = self.parse_mbed_psa_words([
+ "include/mbedtls/*.h",
+ "include/psa/*.h",
+ "library/*.h",
+ "3rdparty/everest/include/everest/everest.h",
+ "3rdparty/everest/include/everest/x25519.h",
+ "library/*.c",
+ "3rdparty/everest/library/everest.c",
+ "3rdparty/everest/library/x25519.c"
+ ], ["library/psa_crypto_driver_wrappers.h"])
+ symbols = self.parse_symbols()
+
+ # Remove identifier macros like mbedtls_printf or mbedtls_calloc
+ identifiers_justname = [x.name for x in identifiers]
+ actual_macros = {"public": [], "internal": []}
+ for scope in actual_macros:
+ for macro in all_macros[scope]:
+ if macro.name not in identifiers_justname:
+ actual_macros[scope].append(macro)
+
+ self.log.debug("Found:")
+ # Aligns the counts on the assumption that none exceeds 4 digits
+ for scope in actual_macros:
+ self.log.debug(" {:4} Total {} Macros"
+ .format(len(all_macros[scope]), scope))
+ self.log.debug(" {:4} {} Non-identifier Macros"
+ .format(len(actual_macros[scope]), scope))
+ self.log.debug(" {:4} Enum Constants".format(len(enum_consts)))
+ self.log.debug(" {:4} Identifiers".format(len(identifiers)))
+ self.log.debug(" {:4} Exported Symbols".format(len(symbols)))
+ return {
+ "public_macros": actual_macros["public"],
+ "internal_macros": actual_macros["internal"],
+ "private_macros": all_macros["private"],
+ "enum_consts": enum_consts,
+ "identifiers": identifiers,
+ "excluded_identifiers": excluded_identifiers,
+ "symbols": symbols,
+ "mbed_psa_words": mbed_psa_words
+ }
+
+ def is_file_excluded(self, path, exclude_wildcards):
+ """Whether the given file path is excluded."""
+ # exclude_wildcards may be None. Also, consider the global exclusions.
+ exclude_wildcards = (exclude_wildcards or []) + self.excluded_files
+ for pattern in exclude_wildcards:
+ if fnmatch.fnmatch(path, pattern):
+ return True
+ return False
+
+ def get_all_files(self, include_wildcards, exclude_wildcards):
+ """
+ Get all files that match any of the included UNIX-style wildcards
+ and filter them into included and excluded lists.
+ While the check_names script is designed only for use on UNIX/macOS
+ (due to nm), this function alone will work fine on Windows even with
+ forward slashes in the wildcard.
+
+ Args:
+ * include_wildcards: a List of shell-style wildcards to match filepaths.
+ * exclude_wildcards: a List of shell-style wildcards to exclude.
+
+ Returns:
+ * inc_files: A List of relative filepaths for included files.
+ * exc_files: A List of relative filepaths for excluded files.
+ """
+ accumulator = set()
+ all_wildcards = include_wildcards + (exclude_wildcards or [])
+ for wildcard in all_wildcards:
+ accumulator = accumulator.union(glob.iglob(wildcard))
+
+ inc_files = []
+ exc_files = []
+ for path in accumulator:
+ if self.is_file_excluded(path, exclude_wildcards):
+ exc_files.append(path)
+ else:
+ inc_files.append(path)
+ return (inc_files, exc_files)
+
+ def get_included_files(self, include_wildcards, exclude_wildcards):
+ """
+ Get all files that match any of the included UNIX-style wildcards.
+ While the check_names script is designed only for use on UNIX/macOS
+ (due to nm), this function alone will work fine on Windows even with
+ forward slashes in the wildcard.
+
+ Args:
+ * include_wildcards: a List of shell-style wildcards to match filepaths.
+ * exclude_wildcards: a List of shell-style wildcards to exclude.
+
+ Returns a List of relative filepaths.
+ """
+ accumulator = set()
+
+ for include_wildcard in include_wildcards:
+ accumulator = accumulator.union(glob.iglob(include_wildcard))
+
+ return list(path for path in accumulator
+ if not self.is_file_excluded(path, exclude_wildcards))
+
+ def parse_macros(self, include, exclude=None):
+ """
+ Parse all macros defined by #define preprocessor directives.
+
+ Args:
+ * include: A List of glob expressions to look for files through.
+ * exclude: A List of glob expressions for excluding files.
+
+ Returns a List of Match objects for the found macros.
+ """
+ macro_regex = re.compile(r"# *define +(?P<macro>\w+)")
+ exclusions = (
+ "asm", "inline", "EMIT", "_CRT_SECURE_NO_DEPRECATE", "MULADDC_"
+ )
+
+ files = self.get_included_files(include, exclude)
+ self.log.debug("Looking for macros in {} files".format(len(files)))
+
+ macros = []
+ for header_file in files:
+ with open(header_file, "r", encoding="utf-8") as header:
+ for line_no, line in enumerate(header):
+ for macro in macro_regex.finditer(line):
+ if macro.group("macro").startswith(exclusions):
+ continue
+
+ macros.append(Match(
+ header_file,
+ line,
+ line_no,
+ macro.span("macro"),
+ macro.group("macro")))
+
+ return macros
+
+ def parse_mbed_psa_words(self, include, exclude=None):
+ """
+ Parse all words in the file that begin with MBED|PSA, in and out of
+ macros, comments, anything.
+
+ Args:
+ * include: A List of glob expressions to look for files through.
+ * exclude: A List of glob expressions for excluding files.
+
+ Returns a List of Match objects for words beginning with MBED|PSA.
+ """
+ # Typos of TLS are common, hence the broader check below than MBEDTLS.
+ mbed_regex = re.compile(r"\b(MBED.+?|PSA)_[A-Z0-9_]*")
+ exclusions = re.compile(r"// *no-check-names|#error")
+
+ files = self.get_included_files(include, exclude)
+ self.log.debug(
+ "Looking for MBED|PSA words in {} files"
+ .format(len(files))
+ )
+
+ mbed_psa_words = []
+ for filename in files:
+ with open(filename, "r", encoding="utf-8") as fp:
+ for line_no, line in enumerate(fp):
+ if exclusions.search(line):
+ continue
+
+ for name in mbed_regex.finditer(line):
+ mbed_psa_words.append(Match(
+ filename,
+ line,
+ line_no,
+ name.span(0),
+ name.group(0)))
+
+ return mbed_psa_words
+
+ def parse_enum_consts(self, include, exclude=None):
+ """
+ Parse all enum value constants that are declared.
+
+ Args:
+ * include: A List of glob expressions to look for files through.
+ * exclude: A List of glob expressions for excluding files.
+
+ Returns a List of Match objects for the findings.
+ """
+ files = self.get_included_files(include, exclude)
+ self.log.debug("Looking for enum consts in {} files".format(len(files)))
+
+ # Emulate a finite state machine to parse enum declarations.
+ # OUTSIDE_KEYWORD = outside the enum keyword
+ # IN_BRACES = inside enum opening braces
+ # IN_BETWEEN = between enum keyword and opening braces
+ states = enum.Enum("FSM", ["OUTSIDE_KEYWORD", "IN_BRACES", "IN_BETWEEN"])
+ enum_consts = []
+ for header_file in files:
+ state = states.OUTSIDE_KEYWORD
+ with open(header_file, "r", encoding="utf-8") as header:
+ for line_no, line in enumerate(header):
+ # Match typedefs and brackets only when they are at the
+ # beginning of the line -- if they are indented, they might
+ # be sub-structures within structs, etc.
+ optional_c_identifier = r"([_a-zA-Z][_a-zA-Z0-9]*)?"
+ if (state == states.OUTSIDE_KEYWORD and
+ re.search(r"^(typedef +)?enum " + \
+ optional_c_identifier + \
+ r" *{", line)):
+ state = states.IN_BRACES
+ elif (state == states.OUTSIDE_KEYWORD and
+ re.search(r"^(typedef +)?enum", line)):
+ state = states.IN_BETWEEN
+ elif (state == states.IN_BETWEEN and
+ re.search(r"^{", line)):
+ state = states.IN_BRACES
+ elif (state == states.IN_BRACES and
+ re.search(r"^}", line)):
+ state = states.OUTSIDE_KEYWORD
+ elif (state == states.IN_BRACES and
+ not re.search(r"^ *#", line)):
+ enum_const = re.search(r"^ *(?P<enum_const>\w+)", line)
+ if not enum_const:
+ continue
+
+ enum_consts.append(Match(
+ header_file,
+ line,
+ line_no,
+ enum_const.span("enum_const"),
+ enum_const.group("enum_const")))
+
+ return enum_consts
+
+ IGNORED_CHUNK_REGEX = re.compile('|'.join([
+ r'/\*.*?\*/', # block comment entirely on one line
+ r'//.*', # line comment
+ r'(?P<string>")(?:[^\\\"]|\\.)*"', # string literal
+ ]))
+
+ def strip_comments_and_literals(self, line, in_block_comment):
+ """Strip comments and string literals from line.
+
+ Continuation lines are not supported.
+
+ If in_block_comment is true, assume that the line starts inside a
+ block comment.
+
+ Return updated values of (line, in_block_comment) where:
+ * Comments in line have been replaced by a space (or nothing at the
+ start or end of the line).
+ * String contents have been removed.
+ * in_block_comment indicates whether the line ends inside a block
+ comment that continues on the next line.
+ """
+
+ # Terminate current multiline comment?
+ if in_block_comment:
+ m = re.search(r"\*/", line)
+ if m:
+ in_block_comment = False
+ line = line[m.end(0):]
+ else:
+ return '', True
+
+ # Remove full comments and string literals.
+ # Do it all together to handle cases like "/*" correctly.
+ # Note that continuation lines are not supported.
+ line = re.sub(self.IGNORED_CHUNK_REGEX,
+ lambda s: '""' if s.group('string') else ' ',
+ line)
+
+ # Start an unfinished comment?
+ # (If `/*` was part of a complete comment, it's already been removed.)
+ m = re.search(r"/\*", line)
+ if m:
+ in_block_comment = True
+ line = line[:m.start(0)]
+
+ return line, in_block_comment
+
+ IDENTIFIER_REGEX = re.compile('|'.join([
+ # Match " something(a" or " *something(a". Functions.
+ # Assumptions:
+ # - function definition from return type to one of its arguments is
+ # all on one line
+ # - function definition line only contains alphanumeric, asterisk,
+ # underscore, and open bracket
+ r".* \**(\w+) *\( *\w",
+ # Match "(*something)(".
+ r".*\( *\* *(\w+) *\) *\(",
+ # Match names of named data structures.
+ r"(?:typedef +)?(?:struct|union|enum) +(\w+)(?: *{)?$",
+ # Match names of typedef instances, after closing bracket.
+ r"}? *(\w+)[;[].*",
+ ]))
+ # The regex below is indented for clarity.
+ EXCLUSION_LINES = re.compile("|".join([
+ r"extern +\"C\"",
+ r"(typedef +)?(struct|union|enum)( *{)?$",
+ r"} *;?$",
+ r"$",
+ r"//",
+ r"#",
+ ]))
+
+ def parse_identifiers_in_file(self, header_file, identifiers):
+ """
+ Parse all lines of a header where a function/enum/struct/union/typedef
+ identifier is declared, based on some regex and heuristics. Highly
+ dependent on formatting style.
+
+ Append found matches to the list ``identifiers``.
+ """
+
+ with open(header_file, "r", encoding="utf-8") as header:
+ in_block_comment = False
+ # The previous line variable is used for concatenating lines
+ # when identifiers are formatted and spread across multiple
+ # lines.
+ previous_line = ""
+
+ for line_no, line in enumerate(header):
+ line, in_block_comment = \
+ self.strip_comments_and_literals(line, in_block_comment)
+
+ if self.EXCLUSION_LINES.match(line):
+ previous_line = ""
+ continue
+
+ # If the line contains only space-separated alphanumeric
+ # characters (or underscore, asterisk, or open parenthesis),
+ # and nothing else, high chance it's a declaration that
+ # continues on the next line
+ if re.search(r"^([\w\*\(]+\s+)+$", line):
+ previous_line += line
+ continue
+
+ # If previous line seemed to start an unfinished declaration
+ # (as above), concat and treat them as one.
+ if previous_line:
+ line = previous_line.strip() + " " + line.strip() + "\n"
+ previous_line = ""
+
+ # Skip parsing if line has a space in front = heuristic to
+ # skip function argument lines (highly subject to formatting
+ # changes)
+ if line[0] == " ":
+ continue
+
+ identifier = self.IDENTIFIER_REGEX.search(line)
+
+ if not identifier:
+ continue
+
+ # Find the group that matched, and append it
+ for group in identifier.groups():
+ if not group:
+ continue
+
+ identifiers.append(Match(
+ header_file,
+ line,
+ line_no,
+ identifier.span(),
+ group))
+
+ def parse_identifiers(self, include, exclude=None):
+ """
+ Parse all lines of a header where a function/enum/struct/union/typedef
+ identifier is declared, based on some regex and heuristics. Highly
+ dependent on formatting style. Identifiers in excluded files are still
+ parsed
+
+ Args:
+ * include: A List of glob expressions to look for files through.
+ * exclude: A List of glob expressions for excluding files.
+
+ Returns: a Tuple of two Lists of Match objects with identifiers.
+ * included_identifiers: A List of Match objects with identifiers from
+ included files.
+ * excluded_identifiers: A List of Match objects with identifiers from
+ excluded files.
+ """
+
+ included_files, excluded_files = \
+ self.get_all_files(include, exclude)
+
+ self.log.debug("Looking for included identifiers in {} files".format \
+ (len(included_files)))
+
+ included_identifiers = []
+ excluded_identifiers = []
+ for header_file in included_files:
+ self.parse_identifiers_in_file(header_file, included_identifiers)
+ for header_file in excluded_files:
+ self.parse_identifiers_in_file(header_file, excluded_identifiers)
+
+ return (included_identifiers, excluded_identifiers)
+
+ def parse_symbols(self):
+ """
+ Compile the Mbed TLS libraries, and parse the TLS, Crypto, and x509
+ object files using nm to retrieve the list of referenced symbols.
+ Exceptions thrown here are rethrown because they would be critical
+ errors that void several tests, and thus needs to halt the program. This
+ is explicitly done for clarity.
+
+ Returns a List of unique symbols defined and used in the libraries.
+ """
+ self.log.info("Compiling...")
+ symbols = []
+
+ # Back up the config and atomically compile with the full configuration.
+ shutil.copy(
+ "include/mbedtls/mbedtls_config.h",
+ "include/mbedtls/mbedtls_config.h.bak"
+ )
+ try:
+ # Use check=True in all subprocess calls so that failures are raised
+ # as exceptions and logged.
+ subprocess.run(
+ ["python3", "scripts/config.py", "full"],
+ universal_newlines=True,
+ check=True
+ )
+ my_environment = os.environ.copy()
+ my_environment["CFLAGS"] = "-fno-asynchronous-unwind-tables"
+ # Run make clean separately to lib to prevent unwanted behavior when
+ # make is invoked with parallelism.
+ subprocess.run(
+ ["make", "clean"],
+ universal_newlines=True,
+ check=True
+ )
+ subprocess.run(
+ ["make", "lib"],
+ env=my_environment,
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ check=True
+ )
+
+ # Perform object file analysis using nm
+ symbols = self.parse_symbols_from_nm([
+ "library/libmbedcrypto.a",
+ "library/libmbedtls.a",
+ "library/libmbedx509.a"
+ ])
+
+ subprocess.run(
+ ["make", "clean"],
+ universal_newlines=True,
+ check=True
+ )
+ except subprocess.CalledProcessError as error:
+ self.log.debug(error.output)
+ raise error
+ finally:
+ # Put back the original config regardless of there being errors.
+ # Works also for keyboard interrupts.
+ shutil.move(
+ "include/mbedtls/mbedtls_config.h.bak",
+ "include/mbedtls/mbedtls_config.h"
+ )
+
+ return symbols
+
+ def parse_symbols_from_nm(self, object_files):
+ """
+ Run nm to retrieve the list of referenced symbols in each object file.
+ Does not return the position data since it is of no use.
+
+ Args:
+ * object_files: a List of compiled object filepaths to search through.
+
+ Returns a List of unique symbols defined and used in any of the object
+ files.
+ """
+ nm_undefined_regex = re.compile(r"^\S+: +U |^$|^\S+:$")
+ nm_valid_regex = re.compile(r"^\S+( [0-9A-Fa-f]+)* . _*(?P<symbol>\w+)")
+ exclusions = ("FStar", "Hacl")
+
+ symbols = []
+
+ # Gather all outputs of nm
+ nm_output = ""
+ for lib in object_files:
+ nm_output += subprocess.run(
+ ["nm", "-og", lib],
+ universal_newlines=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ check=True
+ ).stdout
+
+ for line in nm_output.splitlines():
+ if not nm_undefined_regex.search(line):
+ symbol = nm_valid_regex.search(line)
+ if (symbol and not symbol.group("symbol").startswith(exclusions)):
+ symbols.append(symbol.group("symbol"))
+ else:
+ self.log.error(line)
+
+ return symbols
+
+class NameChecker():
+ """
+ Representation of the core name checking operation performed by this script.
+ """
+ def __init__(self, parse_result, log):
+ self.parse_result = parse_result
+ self.log = log
+
+ def perform_checks(self, quiet=False):
+ """
+ A comprehensive checker that performs each check in order, and outputs
+ a final verdict.
+
+ Args:
+ * quiet: whether to hide detailed problem explanation.
+ """
+ self.log.info("=============")
+ Problem.quiet = quiet
+ problems = 0
+ problems += self.check_symbols_declared_in_header()
+
+ pattern_checks = [
+ ("public_macros", PUBLIC_MACRO_PATTERN),
+ ("internal_macros", INTERNAL_MACRO_PATTERN),
+ ("enum_consts", CONSTANTS_PATTERN),
+ ("identifiers", IDENTIFIER_PATTERN)
+ ]
+ for group, check_pattern in pattern_checks:
+ problems += self.check_match_pattern(group, check_pattern)
+
+ problems += self.check_for_typos()
+
+ self.log.info("=============")
+ if problems > 0:
+ self.log.info("FAIL: {0} problem(s) to fix".format(str(problems)))
+ if quiet:
+ self.log.info("Remove --quiet to see explanations.")
+ else:
+ self.log.info("Use --quiet for minimal output.")
+ return 1
+ else:
+ self.log.info("PASS")
+ return 0
+
+ def check_symbols_declared_in_header(self):
+ """
+ Perform a check that all detected symbols in the library object files
+ are properly declared in headers.
+ Assumes parse_names_in_source() was called before this.
+
+ Returns the number of problems that need fixing.
+ """
+ problems = []
+ all_identifiers = self.parse_result["identifiers"] + \
+ self.parse_result["excluded_identifiers"]
+
+ for symbol in self.parse_result["symbols"]:
+ found_symbol_declared = False
+ for identifier_match in all_identifiers:
+ if symbol == identifier_match.name:
+ found_symbol_declared = True
+ break
+
+ if not found_symbol_declared:
+ problems.append(SymbolNotInHeader(symbol))
+
+ self.output_check_result("All symbols in header", problems)
+ return len(problems)
+
+ def check_match_pattern(self, group_to_check, check_pattern):
+ """
+ Perform a check that all items of a group conform to a regex pattern.
+ Assumes parse_names_in_source() was called before this.
+
+ Args:
+ * group_to_check: string key to index into self.parse_result.
+ * check_pattern: the regex to check against.
+
+ Returns the number of problems that need fixing.
+ """
+ problems = []
+
+ for item_match in self.parse_result[group_to_check]:
+ if not re.search(check_pattern, item_match.name):
+ problems.append(PatternMismatch(check_pattern, item_match))
+ # Double underscore should not be used for names
+ if re.search(r".*__.*", item_match.name):
+ problems.append(
+ PatternMismatch("no double underscore allowed", item_match))
+
+ self.output_check_result(
+ "Naming patterns of {}".format(group_to_check),
+ problems)
+ return len(problems)
+
+ def check_for_typos(self):
+ """
+ Perform a check that all words in the source code beginning with MBED are
+ either defined as macros, or as enum constants.
+ Assumes parse_names_in_source() was called before this.
+
+ Returns the number of problems that need fixing.
+ """
+ problems = []
+
+ # Set comprehension, equivalent to a list comprehension wrapped by set()
+ all_caps_names = {
+ match.name
+ for match
+ in self.parse_result["public_macros"] +
+ self.parse_result["internal_macros"] +
+ self.parse_result["private_macros"] +
+ self.parse_result["enum_consts"]
+ }
+ typo_exclusion = re.compile(r"XXX|__|_$|^MBEDTLS_.*CONFIG_FILE$|"
+ r"MBEDTLS_TEST_LIBTESTDRIVER*|"
+ r"PSA_CRYPTO_DRIVER_TEST")
+
+ for name_match in self.parse_result["mbed_psa_words"]:
+ found = name_match.name in all_caps_names
+
+ # Since MBEDTLS_PSA_ACCEL_XXX defines are defined by the
+ # PSA driver, they will not exist as macros. However, they
+ # should still be checked for typos using the equivalent
+ # BUILTINs that exist.
+ if "MBEDTLS_PSA_ACCEL_" in name_match.name:
+ found = name_match.name.replace(
+ "MBEDTLS_PSA_ACCEL_",
+ "MBEDTLS_PSA_BUILTIN_") in all_caps_names
+
+ if not found and not typo_exclusion.search(name_match.name):
+ problems.append(Typo(name_match))
+
+ self.output_check_result("Likely typos", problems)
+ return len(problems)
+
+ def output_check_result(self, name, problems):
+ """
+ Write out the PASS/FAIL status of a performed check depending on whether
+ there were problems.
+
+ Args:
+ * name: the name of the test
+ * problems: a List of encountered Problems
+ """
+ if problems:
+ self.log.info("{}: FAIL\n".format(name))
+ for problem in problems:
+ self.log.warning(str(problem))
+ else:
+ self.log.info("{}: PASS".format(name))
+
+def main():
+ """
+ Perform argument parsing, and create an instance of CodeParser and
+ NameChecker to begin the core operation.
+ """
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=(
+ "This script confirms that the naming of all symbols and identifiers "
+ "in Mbed TLS are consistent with the house style and are also "
+ "self-consistent.\n\n"
+ "Expected to be run from the Mbed TLS root directory.")
+ )
+ parser.add_argument(
+ "-v", "--verbose",
+ action="store_true",
+ help="show parse results"
+ )
+ parser.add_argument(
+ "-q", "--quiet",
+ action="store_true",
+ help="hide unnecessary text, explanations, and highlights"
+ )
+
+ args = parser.parse_args()
+
+ # Configure the global logger, which is then passed to the classes below
+ log = logging.getLogger()
+ log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
+ log.addHandler(logging.StreamHandler())
+
+ try:
+ code_parser = CodeParser(log)
+ parse_result = code_parser.comprehensive_parse()
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ sys.exit(2)
+
+ name_checker = NameChecker(parse_result, log)
+ return_code = name_checker.perform_checks(quiet=args.quiet)
+
+ sys.exit(return_code)
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/scripts/check_test_cases.py b/tests/scripts/check_test_cases.py
new file mode 100755
index 0000000..d67e678
--- /dev/null
+++ b/tests/scripts/check_test_cases.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+
+"""Sanity checks for test data.
+
+This program contains a class for traversing test cases that can be used
+independently of the checks.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import glob
+import os
+import re
+import subprocess
+import sys
+
+class ScriptOutputError(ValueError):
+ """A kind of ValueError that indicates we found
+ the script doesn't list test cases in an expected
+ pattern.
+ """
+
+ @property
+ def script_name(self):
+ return super().args[0]
+
+ @property
+ def idx(self):
+ return super().args[1]
+
+ @property
+ def line(self):
+ return super().args[2]
+
+class Results:
+ """Store file and line information about errors or warnings in test suites."""
+
+ def __init__(self, options):
+ self.errors = 0
+ self.warnings = 0
+ self.ignore_warnings = options.quiet
+
+ def error(self, file_name, line_number, fmt, *args):
+ sys.stderr.write(('{}:{}:ERROR:' + fmt + '\n').
+ format(file_name, line_number, *args))
+ self.errors += 1
+
+ def warning(self, file_name, line_number, fmt, *args):
+ if not self.ignore_warnings:
+ sys.stderr.write(('{}:{}:Warning:' + fmt + '\n')
+ .format(file_name, line_number, *args))
+ self.warnings += 1
+
+class TestDescriptionExplorer:
+ """An iterator over test cases with descriptions.
+
+The test cases that have descriptions are:
+* Individual unit tests (entries in a .data file) in test suites.
+* Individual test cases in ssl-opt.sh.
+
+This is an abstract class. To use it, derive a class that implements
+the process_test_case method, and call walk_all().
+"""
+
+ def process_test_case(self, per_file_state,
+ file_name, line_number, description):
+ """Process a test case.
+
+per_file_state: an object created by new_per_file_state() at the beginning
+ of each file.
+file_name: a relative path to the file containing the test case.
+line_number: the line number in the given file.
+description: the test case description as a byte string.
+"""
+ raise NotImplementedError
+
+ def new_per_file_state(self):
+ """Return a new per-file state object.
+
+The default per-file state object is None. Child classes that require per-file
+state may override this method.
+"""
+ #pylint: disable=no-self-use
+ return None
+
+ def walk_test_suite(self, data_file_name):
+ """Iterate over the test cases in the given unit test data file."""
+ in_paragraph = False
+ descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+ with open(data_file_name, 'rb') as data_file:
+ for line_number, line in enumerate(data_file, 1):
+ line = line.rstrip(b'\r\n')
+ if not line:
+ in_paragraph = False
+ continue
+ if line.startswith(b'#'):
+ continue
+ if not in_paragraph:
+ # This is a test case description line.
+ self.process_test_case(descriptions,
+ data_file_name, line_number, line)
+ in_paragraph = True
+
+ def collect_from_script(self, script_name):
+ """Collect the test cases in a script by calling its listing test cases
+option"""
+ descriptions = self.new_per_file_state() # pylint: disable=assignment-from-none
+ listed = subprocess.check_output(['sh', script_name, '--list-test-cases'])
+ # Assume test file is responsible for printing identical format of
+ # test case description between --list-test-cases and its OUTCOME.CSV
+ #
+ # idx indicates the number of test case since there is no line number
+ # in the script for each test case.
+ for idx, line in enumerate(listed.splitlines()):
+ # We are expecting the script to list the test cases in
+ # `<suite_name>;<description>` pattern.
+ script_outputs = line.split(b';', 1)
+ if len(script_outputs) == 2:
+ suite_name, description = script_outputs
+ else:
+ raise ScriptOutputError(script_name, idx, line.decode("utf-8"))
+
+ self.process_test_case(descriptions,
+ suite_name.decode('utf-8'),
+ idx,
+ description.rstrip())
+
+ @staticmethod
+ def collect_test_directories():
+ """Get the relative path for the TLS and Crypto test directories."""
+ if os.path.isdir('tests'):
+ tests_dir = 'tests'
+ elif os.path.isdir('suites'):
+ tests_dir = '.'
+ elif os.path.isdir('../suites'):
+ tests_dir = '..'
+ directories = [tests_dir]
+ return directories
+
+ def walk_all(self):
+ """Iterate over all named test cases."""
+ test_directories = self.collect_test_directories()
+ for directory in test_directories:
+ for data_file_name in glob.glob(os.path.join(directory, 'suites',
+ '*.data')):
+ self.walk_test_suite(data_file_name)
+
+ for sh_file in ['ssl-opt.sh', 'compat.sh']:
+ sh_file = os.path.join(directory, sh_file)
+ self.collect_from_script(sh_file)
+
+class TestDescriptions(TestDescriptionExplorer):
+ """Collect the available test cases."""
+
+ def __init__(self):
+ super().__init__()
+ self.descriptions = set()
+
+ def process_test_case(self, _per_file_state,
+ file_name, _line_number, description):
+ """Record an available test case."""
+ base_name = re.sub(r'\.[^.]*$', '', re.sub(r'.*/', '', file_name))
+ key = ';'.join([base_name, description.decode('utf-8')])
+ self.descriptions.add(key)
+
+def collect_available_test_cases():
+ """Collect the available test cases."""
+ explorer = TestDescriptions()
+ explorer.walk_all()
+ return sorted(explorer.descriptions)
+
+class DescriptionChecker(TestDescriptionExplorer):
+ """Check all test case descriptions.
+
+* Check that each description is valid (length, allowed character set, etc.).
+* Check that there is no duplicated description inside of one test suite.
+"""
+
+ def __init__(self, results):
+ self.results = results
+
+ def new_per_file_state(self):
+ """Dictionary mapping descriptions to their line number."""
+ return {}
+
+ def process_test_case(self, per_file_state,
+ file_name, line_number, description):
+ """Check test case descriptions for errors."""
+ results = self.results
+ seen = per_file_state
+ if description in seen:
+ results.error(file_name, line_number,
+ 'Duplicate description (also line {})',
+ seen[description])
+ return
+ if re.search(br'[\t;]', description):
+ results.error(file_name, line_number,
+ 'Forbidden character \'{}\' in description',
+ re.search(br'[\t;]', description).group(0).decode('ascii'))
+ if re.search(br'[^ -~]', description):
+ results.error(file_name, line_number,
+ 'Non-ASCII character in description')
+ if len(description) > 66:
+ results.warning(file_name, line_number,
+ 'Test description too long ({} > 66)',
+ len(description))
+ seen[description] = line_number
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--list-all',
+ action='store_true',
+ help='List all test cases, without doing checks')
+ parser.add_argument('--quiet', '-q',
+ action='store_true',
+ help='Hide warnings')
+ parser.add_argument('--verbose', '-v',
+ action='store_false', dest='quiet',
+ help='Show warnings (default: on; undoes --quiet)')
+ options = parser.parse_args()
+ if options.list_all:
+ descriptions = collect_available_test_cases()
+ sys.stdout.write('\n'.join(descriptions + ['']))
+ return
+ results = Results(options)
+ checker = DescriptionChecker(results)
+ try:
+ checker.walk_all()
+ except ScriptOutputError as e:
+ results.error(e.script_name, e.idx,
+ '"{}" should be listed as "<suite_name>;<description>"',
+ e.line)
+ if (results.warnings or results.errors) and not options.quiet:
+ sys.stderr.write('{}: {} errors, {} warnings\n'
+ .format(sys.argv[0], results.errors, results.warnings))
+ sys.exit(1 if results.errors else 0)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/depends.py b/tests/scripts/depends.py
new file mode 100755
index 0000000..1990cd2
--- /dev/null
+++ b/tests/scripts/depends.py
@@ -0,0 +1,557 @@
+#!/usr/bin/env python3
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Test Mbed TLS with a subset of algorithms.
+
+This script can be divided into several steps:
+
+First, include/mbedtls/mbedtls_config.h or a different config file passed
+in the arguments is parsed to extract any configuration options (using config.py).
+
+Then, test domains (groups of jobs, tests) are built based on predefined data
+collected in the DomainData class. Here, each domain has five major traits:
+- domain name, can be used to run only specific tests via command-line;
+- configuration building method, described in detail below;
+- list of symbols passed to the configuration building method;
+- commands to be run on each job (only build, build and test, or any other custom);
+- optional list of symbols to be excluded from testing.
+
+The configuration building method can be one of the three following:
+
+- ComplementaryDomain - build a job for each passed symbol by disabling a single
+ symbol and its reverse dependencies (defined in REVERSE_DEPENDENCIES);
+
+- ExclusiveDomain - build a job where, for each passed symbol, only this particular
+ one is defined and other symbols from the list are unset. For each job look for
+ any non-standard symbols to set/unset in EXCLUSIVE_GROUPS. These are usually not
+ direct dependencies, but rather non-trivial results of other configs missing. Then
+ look for any unset symbols and handle their reverse dependencies.
+ Examples of EXCLUSIVE_GROUPS usage:
+ - MBEDTLS_SHA512_C job turns off all hashes except SHA512. MBEDTLS_SSL_COOKIE_C
+ requires either SHA256 or SHA384 to work, so it also has to be disabled.
+ This is not a dependency on SHA512_C, but a result of an exclusive domain
+ config building method. Relevant field:
+ 'MBEDTLS_SHA512_C': ['-MBEDTLS_SSL_COOKIE_C'],
+
+- DualDomain - combination of the two above - both complementary and exclusive domain
+ job generation code will be run. Currently only used for hashes.
+
+Lastly, the collected jobs are executed and (optionally) tested, with
+error reporting and coloring as configured in options. Each test starts with
+a full config without a couple of slowing down or unnecessary options
+(see set_reference_config), then the specific job config is derived.
+"""
+import argparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+import traceback
+from typing import Union
+
+# Add the Mbed TLS Python library directory to the module search path
+import scripts_path # pylint: disable=unused-import
+import config
+
+class Colors: # pylint: disable=too-few-public-methods
+ """Minimalistic support for colored output.
+Each field of an object of this class is either None if colored output
+is not possible or not desired, or a pair of strings (start, stop) such
+that outputting start switches the text color to the desired color and
+stop switches the text color back to the default."""
+ red = None
+ green = None
+ cyan = None
+ bold_red = None
+ bold_green = None
+ def __init__(self, options=None):
+ """Initialize color profile according to passed options."""
+ if not options or options.color in ['no', 'never']:
+ want_color = False
+ elif options.color in ['yes', 'always']:
+ want_color = True
+ else:
+ want_color = sys.stderr.isatty()
+ if want_color:
+ # Assume ANSI compatible terminal
+ normal = '\033[0m'
+ self.red = ('\033[31m', normal)
+ self.green = ('\033[32m', normal)
+ self.cyan = ('\033[36m', normal)
+ self.bold_red = ('\033[1;31m', normal)
+ self.bold_green = ('\033[1;32m', normal)
+NO_COLORS = Colors(None)
+
+def log_line(text, prefix='depends.py:', suffix='', color=None):
+ """Print a status message."""
+ if color is not None:
+ prefix = color[0] + prefix
+ suffix = suffix + color[1]
+ sys.stderr.write(prefix + ' ' + text + suffix + '\n')
+ sys.stderr.flush()
+
+def log_command(cmd):
+ """Print a trace of the specified command.
+cmd is a list of strings: a command name and its arguments."""
+ log_line(' '.join(cmd), prefix='+')
+
+def backup_config(options):
+ """Back up the library configuration file (mbedtls_config.h).
+If the backup file already exists, it is presumed to be the desired backup,
+so don't make another backup."""
+ if os.path.exists(options.config_backup):
+ options.own_backup = False
+ else:
+ options.own_backup = True
+ shutil.copy(options.config, options.config_backup)
+
+def restore_config(options):
+ """Restore the library configuration file (mbedtls_config.h).
+Remove the backup file if it was saved earlier."""
+ if options.own_backup:
+ shutil.move(options.config_backup, options.config)
+ else:
+ shutil.copy(options.config_backup, options.config)
+
+def option_exists(conf, option):
+ return option in conf.settings
+
+def set_config_option_value(conf, option, colors, value: Union[bool, str]):
+ """Set/unset a configuration option, optionally specifying a value.
+value can be either True/False (set/unset config option), or a string,
+which will make a symbol defined with a certain value."""
+ if not option_exists(conf, option):
+ log_line('Symbol {} was not found in {}'.format(option, conf.filename), color=colors.red)
+ return False
+
+ if value is False:
+ log_command(['config.py', 'unset', option])
+ conf.unset(option)
+ elif value is True:
+ log_command(['config.py', 'set', option])
+ conf.set(option)
+ else:
+ log_command(['config.py', 'set', option, value])
+ conf.set(option, value)
+ return True
+
+def set_reference_config(conf, options, colors):
+ """Change the library configuration file (mbedtls_config.h) to the reference state.
+The reference state is the one from which the tested configurations are
+derived."""
+ # Turn off options that are not relevant to the tests and slow them down.
+ log_command(['config.py', 'full'])
+ conf.adapt(config.full_adapter)
+ set_config_option_value(conf, 'MBEDTLS_TEST_HOOKS', colors, False)
+ set_config_option_value(conf, 'MBEDTLS_PSA_CRYPTO_CONFIG', colors, False)
+ if options.unset_use_psa:
+ set_config_option_value(conf, 'MBEDTLS_USE_PSA_CRYPTO', colors, False)
+
+class Job:
+ """A job builds the library in a specific configuration and runs some tests."""
+ def __init__(self, name, config_settings, commands):
+ """Build a job object.
+The job uses the configuration described by config_settings. This is a
+dictionary where the keys are preprocessor symbols and the values are
+booleans or strings. A boolean indicates whether or not to #define the
+symbol. With a string, the symbol is #define'd to that value.
+After setting the configuration, the job runs the programs specified by
+commands. This is a list of lists of strings; each list of string is a
+command name and its arguments and is passed to subprocess.call with
+shell=False."""
+ self.name = name
+ self.config_settings = config_settings
+ self.commands = commands
+
+ def announce(self, colors, what):
+ '''Announce the start or completion of a job.
+If what is None, announce the start of the job.
+If what is True, announce that the job has passed.
+If what is False, announce that the job has failed.'''
+ if what is True:
+ log_line(self.name + ' PASSED', color=colors.green)
+ elif what is False:
+ log_line(self.name + ' FAILED', color=colors.red)
+ else:
+ log_line('starting ' + self.name, color=colors.cyan)
+
+ def configure(self, conf, options, colors):
+ '''Set library configuration options as required for the job.'''
+ set_reference_config(conf, options, colors)
+ for key, value in sorted(self.config_settings.items()):
+ ret = set_config_option_value(conf, key, colors, value)
+ if ret is False:
+ return False
+ return True
+
+ def test(self, options):
+ '''Run the job's build and test commands.
+Return True if all the commands succeed and False otherwise.
+If options.keep_going is false, stop as soon as one command fails. Otherwise
+run all the commands, except that if the first command fails, none of the
+other commands are run (typically, the first command is a build command
+and subsequent commands are tests that cannot run if the build failed).'''
+ built = False
+ success = True
+ for command in self.commands:
+ log_command(command)
+ env = os.environ.copy()
+ if 'MBEDTLS_TEST_CONFIGURATION' in env:
+ env['MBEDTLS_TEST_CONFIGURATION'] += '-' + self.name
+ ret = subprocess.call(command, env=env)
+ if ret != 0:
+ if command[0] not in ['make', options.make_command]:
+ log_line('*** [{}] Error {}'.format(' '.join(command), ret))
+ if not options.keep_going or not built:
+ return False
+ success = False
+ built = True
+ return success
+
+# If the configuration option A requires B, make sure that
+# B in REVERSE_DEPENDENCIES[A].
+# All the information here should be contained in check_config.h. This
+# file includes a copy because it changes rarely and it would be a pain
+# to extract automatically.
+REVERSE_DEPENDENCIES = {
+ 'MBEDTLS_AES_C': ['MBEDTLS_CTR_DRBG_C',
+ 'MBEDTLS_NIST_KW_C'],
+ 'MBEDTLS_CHACHA20_C': ['MBEDTLS_CHACHAPOLY_C'],
+ 'MBEDTLS_ECDSA_C': ['MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED'],
+ 'MBEDTLS_ECP_C': ['MBEDTLS_ECDSA_C',
+ 'MBEDTLS_ECDH_C',
+ 'MBEDTLS_ECJPAKE_C',
+ 'MBEDTLS_ECP_RESTARTABLE',
+ 'MBEDTLS_PK_PARSE_EC_EXTENDED',
+ 'MBEDTLS_PK_PARSE_EC_COMPRESSED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
+ 'MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED',
+ 'MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL_ENABLED'],
+ 'MBEDTLS_ECP_DP_SECP256R1_ENABLED': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
+ 'MBEDTLS_PKCS1_V21': ['MBEDTLS_X509_RSASSA_PSS_SUPPORT'],
+ 'MBEDTLS_PKCS1_V15': ['MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_RSA_ENABLED'],
+ 'MBEDTLS_RSA_C': ['MBEDTLS_X509_RSASSA_PSS_SUPPORT',
+ 'MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_RSA_ENABLED',
+ 'MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED'],
+ 'MBEDTLS_SHA256_C': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
+ 'MBEDTLS_ENTROPY_FORCE_SHA256',
+ 'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT',
+ 'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY',
+ 'MBEDTLS_LMS_C',
+ 'MBEDTLS_LMS_PRIVATE'],
+ 'MBEDTLS_SHA512_C': ['MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT',
+ 'MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY'],
+ 'MBEDTLS_SHA224_C': ['MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED',
+ 'MBEDTLS_ENTROPY_FORCE_SHA256',
+ 'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_IF_PRESENT',
+ 'MBEDTLS_SHA256_USE_ARMV8_A_CRYPTO_ONLY'],
+ 'MBEDTLS_X509_RSASSA_PSS_SUPPORT': []
+}
+
+# If an option is tested in an exclusive test, alter the following defines.
+# These are not necessarily dependencies, but just minimal required changes
+# if a given define is the only one enabled from an exclusive group.
+EXCLUSIVE_GROUPS = {
+ 'MBEDTLS_SHA512_C': ['-MBEDTLS_SSL_COOKIE_C',
+ '-MBEDTLS_SSL_TLS_C'],
+ 'MBEDTLS_ECP_DP_CURVE448_ENABLED': ['-MBEDTLS_ECDSA_C',
+ '-MBEDTLS_ECDSA_DETERMINISTIC',
+ '-MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
+ '-MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
+ '-MBEDTLS_ECJPAKE_C',
+ '-MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
+ 'MBEDTLS_ECP_DP_CURVE25519_ENABLED': ['-MBEDTLS_ECDSA_C',
+ '-MBEDTLS_ECDSA_DETERMINISTIC',
+ '-MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED',
+ '-MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED',
+ '-MBEDTLS_ECJPAKE_C',
+ '-MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED'],
+ 'MBEDTLS_ARIA_C': ['-MBEDTLS_CMAC_C'],
+ 'MBEDTLS_CAMELLIA_C': ['-MBEDTLS_CMAC_C'],
+ 'MBEDTLS_CHACHA20_C': ['-MBEDTLS_CMAC_C', '-MBEDTLS_CCM_C', '-MBEDTLS_GCM_C'],
+ 'MBEDTLS_DES_C': ['-MBEDTLS_CCM_C',
+ '-MBEDTLS_GCM_C',
+ '-MBEDTLS_SSL_TICKET_C',
+ '-MBEDTLS_SSL_CONTEXT_SERIALIZATION'],
+}
+def handle_exclusive_groups(config_settings, symbol):
+ """For every symbol tested in an exclusive group check if there are other
+defines to be altered. """
+ for dep in EXCLUSIVE_GROUPS.get(symbol, []):
+ unset = dep.startswith('-')
+ dep = dep[1:]
+ config_settings[dep] = not unset
+
+def turn_off_dependencies(config_settings):
+ """For every option turned off config_settings, also turn off what depends on it.
+An option O is turned off if config_settings[O] is False."""
+ for key, value in sorted(config_settings.items()):
+ if value is not False:
+ continue
+ for dep in REVERSE_DEPENDENCIES.get(key, []):
+ config_settings[dep] = False
+
+class BaseDomain: # pylint: disable=too-few-public-methods, unused-argument
+ """A base class for all domains."""
+ def __init__(self, symbols, commands, exclude):
+ """Initialize the jobs container"""
+ self.jobs = []
+
+class ExclusiveDomain(BaseDomain): # pylint: disable=too-few-public-methods
+ """A domain consisting of a set of conceptually-equivalent settings.
+Establish a list of configuration symbols. For each symbol, run a test job
+with this symbol set and the others unset."""
+ def __init__(self, symbols, commands, exclude=None):
+ """Build a domain for the specified list of configuration symbols.
+The domain contains a set of jobs that enable one of the elements
+of symbols and disable the others.
+Each job runs the specified commands.
+If exclude is a regular expression, skip generated jobs whose description
+would match this regular expression."""
+ super().__init__(symbols, commands, exclude)
+ base_config_settings = {}
+ for symbol in symbols:
+ base_config_settings[symbol] = False
+ for symbol in symbols:
+ description = symbol
+ if exclude and re.match(exclude, description):
+ continue
+ config_settings = base_config_settings.copy()
+ config_settings[symbol] = True
+ handle_exclusive_groups(config_settings, symbol)
+ turn_off_dependencies(config_settings)
+ job = Job(description, config_settings, commands)
+ self.jobs.append(job)
+
+class ComplementaryDomain(BaseDomain): # pylint: disable=too-few-public-methods
+ """A domain consisting of a set of loosely-related settings.
+Establish a list of configuration symbols. For each symbol, run a test job
+with this symbol unset.
+If exclude is a regular expression, skip generated jobs whose description
+would match this regular expression."""
+ def __init__(self, symbols, commands, exclude=None):
+ """Build a domain for the specified list of configuration symbols.
+Each job in the domain disables one of the specified symbols.
+Each job runs the specified commands."""
+ super().__init__(symbols, commands, exclude)
+ for symbol in symbols:
+ description = '!' + symbol
+ if exclude and re.match(exclude, description):
+ continue
+ config_settings = {symbol: False}
+ turn_off_dependencies(config_settings)
+ job = Job(description, config_settings, commands)
+ self.jobs.append(job)
+
+class DualDomain(ExclusiveDomain, ComplementaryDomain): # pylint: disable=too-few-public-methods
+ """A domain that contains both the ExclusiveDomain and BaseDomain tests.
+Both parent class __init__ calls are performed in any order and
+each call adds respective jobs. The job array initialization is done once in
+BaseDomain, before the parent __init__ calls."""
+
+class CipherInfo: # pylint: disable=too-few-public-methods
+ """Collect data about cipher.h."""
+ def __init__(self):
+ self.base_symbols = set()
+ with open('include/mbedtls/cipher.h', encoding="utf-8") as fh:
+ for line in fh:
+ m = re.match(r' *MBEDTLS_CIPHER_ID_(\w+),', line)
+ if m and m.group(1) not in ['NONE', 'NULL', '3DES']:
+ self.base_symbols.add('MBEDTLS_' + m.group(1) + '_C')
+
+class DomainData:
+ """A container for domains and jobs, used to structurize testing."""
+ def config_symbols_matching(self, regexp):
+ """List the mbedtls_config.h settings matching regexp."""
+ return [symbol for symbol in self.all_config_symbols
+ if re.match(regexp, symbol)]
+
+ def __init__(self, options, conf):
+ """Gather data about the library and establish a list of domains to test."""
+ build_command = [options.make_command, 'CFLAGS=-Werror -O2']
+ build_and_test = [build_command, [options.make_command, 'test']]
+ self.all_config_symbols = set(conf.settings.keys())
+ # Find hash modules by name.
+ hash_symbols = self.config_symbols_matching(r'MBEDTLS_(MD|RIPEMD|SHA)[0-9]+_C\Z')
+ # Find elliptic curve enabling macros by name.
+ curve_symbols = self.config_symbols_matching(r'MBEDTLS_ECP_DP_\w+_ENABLED\Z')
+ # Find key exchange enabling macros by name.
+ key_exchange_symbols = self.config_symbols_matching(r'MBEDTLS_KEY_EXCHANGE_\w+_ENABLED\Z')
+ # Find cipher IDs (block permutations and stream ciphers --- chaining
+ # and padding modes are exercised separately) information by parsing
+ # cipher.h, as the information is not readily available in mbedtls_config.h.
+ cipher_info = CipherInfo()
+ # Find block cipher chaining and padding mode enabling macros by name.
+ cipher_chaining_symbols = self.config_symbols_matching(r'MBEDTLS_CIPHER_MODE_\w+\Z')
+ cipher_padding_symbols = self.config_symbols_matching(r'MBEDTLS_CIPHER_PADDING_\w+\Z')
+ self.domains = {
+ # Cipher IDs, chaining modes and padding modes. Run the test suites.
+ 'cipher_id': ExclusiveDomain(cipher_info.base_symbols,
+ build_and_test),
+ 'cipher_chaining': ExclusiveDomain(cipher_chaining_symbols,
+ build_and_test),
+ 'cipher_padding': ExclusiveDomain(cipher_padding_symbols,
+ build_and_test),
+ # Elliptic curves. Run the test suites.
+ 'curves': ExclusiveDomain(curve_symbols, build_and_test),
+ # Hash algorithms. Excluding exclusive domains of MD, RIPEMD, SHA1,
+ # SHA224 and SHA384 because MBEDTLS_ENTROPY_C is extensively used
+ # across various modules, but it depends on either SHA256 or SHA512.
+ # As a consequence an "exclusive" test of anything other than SHA256
+ # or SHA512 with MBEDTLS_ENTROPY_C enabled is not possible.
+ 'hashes': DualDomain(hash_symbols, build_and_test,
+ exclude=r'MBEDTLS_(MD|RIPEMD|SHA1_)' \
+ '|MBEDTLS_SHA224_' \
+ '|MBEDTLS_SHA384_' \
+ '|MBEDTLS_SHA3_'),
+ # Key exchange types.
+ 'kex': ExclusiveDomain(key_exchange_symbols, build_and_test),
+ 'pkalgs': ComplementaryDomain(['MBEDTLS_ECDSA_C',
+ 'MBEDTLS_ECP_C',
+ 'MBEDTLS_PKCS1_V21',
+ 'MBEDTLS_PKCS1_V15',
+ 'MBEDTLS_RSA_C',
+ 'MBEDTLS_X509_RSASSA_PSS_SUPPORT'],
+ build_and_test),
+ }
+ self.jobs = {}
+ for domain in self.domains.values():
+ for job in domain.jobs:
+ self.jobs[job.name] = job
+
+ def get_jobs(self, name):
+ """Return the list of jobs identified by the given name.
+A name can either be the name of a domain or the name of one specific job."""
+ if name in self.domains:
+ return sorted(self.domains[name].jobs, key=lambda job: job.name)
+ else:
+ return [self.jobs[name]]
+
+def run(options, job, conf, colors=NO_COLORS):
+ """Run the specified job (a Job instance)."""
+ subprocess.check_call([options.make_command, 'clean'])
+ job.announce(colors, None)
+ if not job.configure(conf, options, colors):
+ job.announce(colors, False)
+ return False
+ conf.write()
+ success = job.test(options)
+ job.announce(colors, success)
+ return success
+
+def run_tests(options, domain_data, conf):
+ """Run the desired jobs.
+domain_data should be a DomainData instance that describes the available
+domains and jobs.
+Run the jobs listed in options.tasks."""
+ if not hasattr(options, 'config_backup'):
+ options.config_backup = options.config + '.bak'
+ colors = Colors(options)
+ jobs = []
+ failures = []
+ successes = []
+ for name in options.tasks:
+ jobs += domain_data.get_jobs(name)
+ backup_config(options)
+ try:
+ for job in jobs:
+ success = run(options, job, conf, colors=colors)
+ if not success:
+ if options.keep_going:
+ failures.append(job.name)
+ else:
+ return False
+ else:
+ successes.append(job.name)
+ restore_config(options)
+ except:
+ # Restore the configuration, except in stop-on-error mode if there
+ # was an error, where we leave the failing configuration up for
+ # developer convenience.
+ if options.keep_going:
+ restore_config(options)
+ raise
+ if successes:
+ log_line('{} passed'.format(' '.join(successes)), color=colors.bold_green)
+ if failures:
+ log_line('{} FAILED'.format(' '.join(failures)), color=colors.bold_red)
+ return False
+ else:
+ return True
+
+def main():
+ try:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=
+ "Test Mbed TLS with a subset of algorithms.\n\n"
+ "Example usage:\n"
+ r"./tests/scripts/depends.py \!MBEDTLS_SHA1_C MBEDTLS_SHA256_C""\n"
+ "./tests/scripts/depends.py MBEDTLS_AES_C hashes\n"
+ "./tests/scripts/depends.py cipher_id cipher_chaining\n")
+ parser.add_argument('--color', metavar='WHEN',
+ help='Colorize the output (always/auto/never)',
+ choices=['always', 'auto', 'never'], default='auto')
+ parser.add_argument('-c', '--config', metavar='FILE',
+ help='Configuration file to modify',
+ default='include/mbedtls/mbedtls_config.h')
+ parser.add_argument('-C', '--directory', metavar='DIR',
+ help='Change to this directory before anything else',
+ default='.')
+ parser.add_argument('-k', '--keep-going',
+ help='Try all configurations even if some fail (default)',
+ action='store_true', dest='keep_going', default=True)
+ parser.add_argument('-e', '--no-keep-going',
+ help='Stop as soon as a configuration fails',
+ action='store_false', dest='keep_going')
+ parser.add_argument('--list-jobs',
+ help='List supported jobs and exit',
+ action='append_const', dest='list', const='jobs')
+ parser.add_argument('--list-domains',
+ help='List supported domains and exit',
+ action='append_const', dest='list', const='domains')
+ parser.add_argument('--make-command', metavar='CMD',
+ help='Command to run instead of make (e.g. gmake)',
+ action='store', default='make')
+ parser.add_argument('--unset-use-psa',
+ help='Unset MBEDTLS_USE_PSA_CRYPTO before any test',
+ action='store_true', dest='unset_use_psa')
+ parser.add_argument('tasks', metavar='TASKS', nargs='*',
+ help='The domain(s) or job(s) to test (default: all).',
+ default=True)
+ options = parser.parse_args()
+ os.chdir(options.directory)
+ conf = config.ConfigFile(options.config)
+ domain_data = DomainData(options, conf)
+
+ if options.tasks is True:
+ options.tasks = sorted(domain_data.domains.keys())
+ if options.list:
+ for arg in options.list:
+ for domain_name in sorted(getattr(domain_data, arg).keys()):
+ print(domain_name)
+ sys.exit(0)
+ else:
+ sys.exit(0 if run_tests(options, domain_data, conf) else 1)
+ except Exception: # pylint: disable=broad-except
+ traceback.print_exc()
+ sys.exit(3)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/docker_env.sh b/tests/scripts/docker_env.sh
new file mode 100755
index 0000000..cfc98df
--- /dev/null
+++ b/tests/scripts/docker_env.sh
@@ -0,0 +1,90 @@
+#!/bin/bash -eu
+
+# docker_env.sh
+#
+# Purpose
+# -------
+#
+# This is a helper script to enable running tests under a Docker container,
+# thus making it easier to get set up as well as isolating test dependencies
+# (which include legacy/insecure configurations of openssl and gnutls).
+#
+# WARNING: the Dockerfile used by this script is no longer maintained! See
+# https://github.com/Mbed-TLS/mbedtls-test/blob/master/README.md#quick-start
+# for the set of Docker images we use on the CI.
+#
+# Notes for users
+# ---------------
+# This script expects a Linux x86_64 system with a recent version of Docker
+# installed and available for use, as well as http/https access. If a proxy
+# server must be used, invoke this script with the usual environment variables
+# (http_proxy and https_proxy) set appropriately. If an alternate Docker
+# registry is needed, specify MBEDTLS_DOCKER_REGISTRY to point at the
+# host name.
+#
+#
+# Running this script directly will check for Docker availability and set up
+# the Docker image.
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+
+# default values, can be overridden by the environment
+: ${MBEDTLS_DOCKER_GUEST:=bionic}
+
+
+DOCKER_IMAGE_TAG="armmbed/mbedtls-test:${MBEDTLS_DOCKER_GUEST}"
+
+# Make sure docker is available
+if ! which docker > /dev/null; then
+ echo "Docker is required but doesn't seem to be installed. See https://www.docker.com/ to get started"
+ exit 1
+fi
+
+# Figure out if we need to 'sudo docker'
+if groups | grep docker > /dev/null; then
+ DOCKER="docker"
+else
+ echo "Using sudo to invoke docker since you're not a member of the docker group..."
+ DOCKER="sudo docker"
+fi
+
+# Figure out the number of processors available
+if [ "$(uname)" == "Darwin" ]; then
+ NUM_PROC="$(sysctl -n hw.logicalcpu)"
+else
+ NUM_PROC="$(nproc)"
+fi
+
+# Build the Docker image
+echo "Getting docker image up to date (this may take a few minutes)..."
+${DOCKER} image build \
+ -t ${DOCKER_IMAGE_TAG} \
+ --cache-from=${DOCKER_IMAGE_TAG} \
+ --build-arg MAKEFLAGS_PARALLEL="-j ${NUM_PROC}" \
+ --network host \
+ ${http_proxy+--build-arg http_proxy=${http_proxy}} \
+ ${https_proxy+--build-arg https_proxy=${https_proxy}} \
+ ${MBEDTLS_DOCKER_REGISTRY+--build-arg MY_REGISTRY="${MBEDTLS_DOCKER_REGISTRY}/"} \
+ tests/docker/${MBEDTLS_DOCKER_GUEST}
+
+run_in_docker()
+{
+ ENV_ARGS=""
+ while [ "$1" == "-e" ]; do
+ ENV_ARGS="${ENV_ARGS} $1 $2"
+ shift 2
+ done
+
+ ${DOCKER} container run -it --rm \
+ --cap-add SYS_PTRACE \
+ --user "$(id -u):$(id -g)" \
+ --volume $PWD:$PWD \
+ --workdir $PWD \
+ -e MAKEFLAGS \
+ -e PYLINTHOME=/tmp/.pylintd \
+ ${ENV_ARGS} \
+ ${DOCKER_IMAGE_TAG} \
+ $@
+}
diff --git a/tests/scripts/doxygen.sh b/tests/scripts/doxygen.sh
new file mode 100755
index 0000000..b6a1d45
--- /dev/null
+++ b/tests/scripts/doxygen.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+# Make sure the doxygen documentation builds without warnings
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# Abort on errors (and uninitialised variables)
+set -eu
+
+if [ -d library -a -d include -a -d tests ]; then :; else
+ echo "Must be run from Mbed TLS root" >&2
+ exit 1
+fi
+
+if scripts/apidoc_full.sh > doc.out 2>doc.err; then :; else
+ cat doc.err
+ echo "FAIL" >&2
+ exit 1;
+fi
+
+cat doc.out doc.err | \
+ grep -v "warning: ignoring unsupported tag" \
+ > doc.filtered
+
+if grep -E "(warning|error):" doc.filtered; then
+ echo "FAIL" >&2
+ exit 1;
+fi
+
+make apidoc_clean
+rm -f doc.out doc.err doc.filtered
diff --git a/tests/scripts/gen_ctr_drbg.pl b/tests/scripts/gen_ctr_drbg.pl
new file mode 100755
index 0000000..ec5e5d8
--- /dev/null
+++ b/tests/scripts/gen_ctr_drbg.pl
@@ -0,0 +1,96 @@
+#!/usr/bin/env perl
+#
+# Based on NIST CTR_DRBG.rsp validation file
+# Only uses AES-256-CTR cases that use a Derivation function
+# and concats nonce and personalization for initialization.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use strict;
+
+my $file = shift;
+
+open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
+
+sub get_suite_val($)
+{
+ my $name = shift;
+ my $val = "";
+
+ my $line = <TEST_DATA>;
+ ($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
+
+ return $val;
+}
+
+sub get_val($)
+{
+ my $name = shift;
+ my $val = "";
+ my $line;
+
+ while($line = <TEST_DATA>)
+ {
+ next if($line !~ /=/);
+ last;
+ }
+
+ ($val) = ($line =~ /^$name = (\w+)/);
+
+ return $val;
+}
+
+my $cnt = 1;;
+while (my $line = <TEST_DATA>)
+{
+ next if ($line !~ /^\[AES-256 use df/);
+
+ my $PredictionResistanceStr = get_suite_val("PredictionResistance");
+ my $PredictionResistance = 0;
+ $PredictionResistance = 1 if ($PredictionResistanceStr eq 'True');
+ my $EntropyInputLen = get_suite_val("EntropyInputLen");
+ my $NonceLen = get_suite_val("NonceLen");
+ my $PersonalizationStringLen = get_suite_val("PersonalizationStringLen");
+ my $AdditionalInputLen = get_suite_val("AdditionalInputLen");
+
+ for ($cnt = 0; $cnt < 15; $cnt++)
+ {
+ my $Count = get_val("COUNT");
+ my $EntropyInput = get_val("EntropyInput");
+ my $Nonce = get_val("Nonce");
+ my $PersonalizationString = get_val("PersonalizationString");
+ my $AdditionalInput1 = get_val("AdditionalInput");
+ my $EntropyInputPR1 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
+ my $EntropyInputReseed = get_val("EntropyInputReseed") if ($PredictionResistance == 0);
+ my $AdditionalInputReseed = get_val("AdditionalInputReseed") if ($PredictionResistance == 0);
+ my $AdditionalInput2 = get_val("AdditionalInput");
+ my $EntropyInputPR2 = get_val("EntropyInputPR") if ($PredictionResistance == 1);
+ my $ReturnedBits = get_val("ReturnedBits");
+
+ if ($PredictionResistance == 1)
+ {
+ print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
+ print("ctr_drbg_validate_pr");
+ print(":\"$Nonce$PersonalizationString\"");
+ print(":\"$EntropyInput$EntropyInputPR1$EntropyInputPR2\"");
+ print(":\"$AdditionalInput1\"");
+ print(":\"$AdditionalInput2\"");
+ print(":\"$ReturnedBits\"");
+ print("\n\n");
+ }
+ else
+ {
+ print("CTR_DRBG NIST Validation (AES-256 use df,$PredictionResistanceStr,$EntropyInputLen,$NonceLen,$PersonalizationStringLen,$AdditionalInputLen) #$Count\n");
+ print("ctr_drbg_validate_nopr");
+ print(":\"$Nonce$PersonalizationString\"");
+ print(":\"$EntropyInput$EntropyInputReseed\"");
+ print(":\"$AdditionalInput1\"");
+ print(":\"$AdditionalInputReseed\"");
+ print(":\"$AdditionalInput2\"");
+ print(":\"$ReturnedBits\"");
+ print("\n\n");
+ }
+ }
+}
+close(TEST_DATA);
diff --git a/tests/scripts/gen_gcm_decrypt.pl b/tests/scripts/gen_gcm_decrypt.pl
new file mode 100755
index 0000000..30d45c3
--- /dev/null
+++ b/tests/scripts/gen_gcm_decrypt.pl
@@ -0,0 +1,101 @@
+#!/usr/bin/env perl
+#
+# Based on NIST gcmDecryptxxx.rsp validation files
+# Only first 3 of every set used for compile time saving
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use strict;
+
+my $file = shift;
+
+open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
+
+sub get_suite_val($)
+{
+ my $name = shift;
+ my $val = "";
+
+ while(my $line = <TEST_DATA>)
+ {
+ next if ($line !~ /^\[/);
+ ($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
+ last;
+ }
+
+ return $val;
+}
+
+sub get_val($)
+{
+ my $name = shift;
+ my $val = "";
+ my $line;
+
+ while($line = <TEST_DATA>)
+ {
+ next if($line !~ /=/);
+ last;
+ }
+
+ ($val) = ($line =~ /^$name = (\w+)/);
+
+ return $val;
+}
+
+sub get_val_or_fail($)
+{
+ my $name = shift;
+ my $val = "FAIL";
+ my $line;
+
+ while($line = <TEST_DATA>)
+ {
+ next if($line !~ /=/ && $line !~ /FAIL/);
+ last;
+ }
+
+ ($val) = ($line =~ /^$name = (\w+)/) if ($line =~ /=/);
+
+ return $val;
+}
+
+my $cnt = 1;;
+while (my $line = <TEST_DATA>)
+{
+ my $key_len = get_suite_val("Keylen");
+ next if ($key_len !~ /\d+/);
+ my $iv_len = get_suite_val("IVlen");
+ my $pt_len = get_suite_val("PTlen");
+ my $add_len = get_suite_val("AADlen");
+ my $tag_len = get_suite_val("Taglen");
+
+ for ($cnt = 0; $cnt < 3; $cnt++)
+ {
+ my $Count = get_val("Count");
+ my $key = get_val("Key");
+ my $iv = get_val("IV");
+ my $ct = get_val("CT");
+ my $add = get_val("AAD");
+ my $tag = get_val("Tag");
+ my $pt = get_val_or_fail("PT");
+
+ print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
+ print("gcm_decrypt_and_verify");
+ print(":\"$key\"");
+ print(":\"$ct\"");
+ print(":\"$iv\"");
+ print(":\"$add\"");
+ print(":$tag_len");
+ print(":\"$tag\"");
+ print(":\"$pt\"");
+ print(":0");
+ print("\n\n");
+ }
+}
+
+print("GCM Selftest\n");
+print("gcm_selftest:\n\n");
+
+close(TEST_DATA);
diff --git a/tests/scripts/gen_gcm_encrypt.pl b/tests/scripts/gen_gcm_encrypt.pl
new file mode 100755
index 0000000..b4f0849
--- /dev/null
+++ b/tests/scripts/gen_gcm_encrypt.pl
@@ -0,0 +1,84 @@
+#!/usr/bin/env perl
+#
+# Based on NIST gcmEncryptIntIVxxx.rsp validation files
+# Only first 3 of every set used for compile time saving
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use strict;
+
+my $file = shift;
+
+open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
+
+sub get_suite_val($)
+{
+ my $name = shift;
+ my $val = "";
+
+ while(my $line = <TEST_DATA>)
+ {
+ next if ($line !~ /^\[/);
+ ($val) = ($line =~ /\[$name\s\=\s(\w+)\]/);
+ last;
+ }
+
+ return $val;
+}
+
+sub get_val($)
+{
+ my $name = shift;
+ my $val = "";
+ my $line;
+
+ while($line = <TEST_DATA>)
+ {
+ next if($line !~ /=/);
+ last;
+ }
+
+ ($val) = ($line =~ /^$name = (\w+)/);
+
+ return $val;
+}
+
+my $cnt = 1;;
+while (my $line = <TEST_DATA>)
+{
+ my $key_len = get_suite_val("Keylen");
+ next if ($key_len !~ /\d+/);
+ my $iv_len = get_suite_val("IVlen");
+ my $pt_len = get_suite_val("PTlen");
+ my $add_len = get_suite_val("AADlen");
+ my $tag_len = get_suite_val("Taglen");
+
+ for ($cnt = 0; $cnt < 3; $cnt++)
+ {
+ my $Count = get_val("Count");
+ my $key = get_val("Key");
+ my $pt = get_val("PT");
+ my $add = get_val("AAD");
+ my $iv = get_val("IV");
+ my $ct = get_val("CT");
+ my $tag = get_val("Tag");
+
+ print("GCM NIST Validation (AES-$key_len,$iv_len,$pt_len,$add_len,$tag_len) #$Count\n");
+ print("gcm_encrypt_and_tag");
+ print(":\"$key\"");
+ print(":\"$pt\"");
+ print(":\"$iv\"");
+ print(":\"$add\"");
+ print(":\"$ct\"");
+ print(":$tag_len");
+ print(":\"$tag\"");
+ print(":0");
+ print("\n\n");
+ }
+}
+
+print("GCM Selftest\n");
+print("gcm_selftest:\n\n");
+
+close(TEST_DATA);
diff --git a/tests/scripts/gen_pkcs1_v21_sign_verify.pl b/tests/scripts/gen_pkcs1_v21_sign_verify.pl
new file mode 100755
index 0000000..fe2d3f5
--- /dev/null
+++ b/tests/scripts/gen_pkcs1_v21_sign_verify.pl
@@ -0,0 +1,74 @@
+#!/usr/bin/env perl
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use strict;
+
+my $file = shift;
+
+open(TEST_DATA, "$file") or die "Opening test cases '$file': $!";
+
+sub get_val($$)
+{
+ my $str = shift;
+ my $name = shift;
+ my $val = "";
+
+ while(my $line = <TEST_DATA>)
+ {
+ next if($line !~ /^# $str/);
+ last;
+ }
+
+ while(my $line = <TEST_DATA>)
+ {
+ last if($line eq "\r\n");
+ $val .= $line;
+ }
+
+ $val =~ s/[ \r\n]//g;
+
+ return $val;
+}
+
+my $state = 0;
+my $val_n = "";
+my $val_e = "";
+my $val_p = "";
+my $val_q = "";
+my $mod = 0;
+my $cnt = 1;
+while (my $line = <TEST_DATA>)
+{
+ next if ($line !~ /^# Example/);
+
+ ( $mod ) = ($line =~ /A (\d+)/);
+ $val_n = get_val("RSA modulus n", "N");
+ $val_e = get_val("RSA public exponent e", "E");
+ $val_p = get_val("Prime p", "P");
+ $val_q = get_val("Prime q", "Q");
+
+ for(my $i = 1; $i <= 6; $i++)
+ {
+ my $val_m = get_val("Message to be", "M");
+ my $val_salt = get_val("Salt", "Salt");
+ my $val_sig = get_val("Signature", "Sig");
+
+ print("RSASSA-PSS Signature Example ${cnt}_${i}\n");
+ print("pkcs1_rsassa_pss_sign:$mod:16:\"$val_p\":16:\"$val_q\":16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
+ print(":\"$val_m\"");
+ print(":\"$val_salt\"");
+ print(":\"$val_sig\":0");
+ print("\n\n");
+
+ print("RSASSA-PSS Signature Example ${cnt}_${i} (verify)\n");
+ print("pkcs1_rsassa_pss_verify:$mod:16:\"$val_n\":16:\"$val_e\":SIG_RSA_SHA1:MBEDTLS_MD_SHA1");
+ print(":\"$val_m\"");
+ print(":\"$val_salt\"");
+ print(":\"$val_sig\":0");
+ print("\n\n");
+ }
+ $cnt++;
+}
+close(TEST_DATA);
diff --git a/tests/scripts/generate-afl-tests.sh b/tests/scripts/generate-afl-tests.sh
new file mode 100755
index 0000000..d4ef0f3
--- /dev/null
+++ b/tests/scripts/generate-afl-tests.sh
@@ -0,0 +1,71 @@
+#!/bin/sh
+
+# This script splits the data test files containing the test cases into
+# individual files (one test case per file) suitable for use with afl
+# (American Fuzzy Lop). http://lcamtuf.coredump.cx/afl/
+#
+# Usage: generate-afl-tests.sh <test data file path>
+# <test data file path> - should be the path to one of the test suite files
+# such as 'test_suite_rsa.data'
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+# Abort on errors
+set -e
+
+if [ -z $1 ]
+then
+ echo " [!] No test file specified" >&2
+ echo "Usage: $0 <test data file>" >&2
+ exit 1
+fi
+
+SRC_FILEPATH=$(dirname $1)/$(basename $1)
+TESTSUITE=$(basename $1 .data)
+
+THIS_DIR=$(basename $PWD)
+
+if [ -d ../library -a -d ../include -a -d ../tests -a $THIS_DIR == "tests" ];
+then :;
+else
+ echo " [!] Must be run from Mbed TLS tests directory" >&2
+ exit 1
+fi
+
+DEST_TESTCASE_DIR=$TESTSUITE-afl-tests
+DEST_OUTPUT_DIR=$TESTSUITE-afl-out
+
+echo " [+] Creating output directories" >&2
+
+if [ -e $DEST_OUTPUT_DIR/* ];
+then :
+ echo " [!] Test output files already exist." >&2
+ exit 1
+else
+ mkdir -p $DEST_OUTPUT_DIR
+fi
+
+if [ -e $DEST_TESTCASE_DIR/* ];
+then :
+ echo " [!] Test output files already exist." >&2
+else
+ mkdir -p $DEST_TESTCASE_DIR
+fi
+
+echo " [+] Creating test cases" >&2
+cd $DEST_TESTCASE_DIR
+
+split -p '^\s*$' ../$SRC_FILEPATH
+
+for f in *;
+do
+ # Strip out any blank lines (no trim on OS X)
+ sed '/^\s*$/d' $f >testcase_$f
+ rm $f
+done
+
+cd ..
+
+echo " [+] Test cases in $DEST_TESTCASE_DIR" >&2
+
diff --git a/tests/scripts/generate_bignum_tests.py b/tests/scripts/generate_bignum_tests.py
new file mode 100755
index 0000000..8dbb6ed
--- /dev/null
+++ b/tests/scripts/generate_bignum_tests.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python3
+"""Generate test data for bignum functions.
+
+With no arguments, generate all test data. With non-option arguments,
+generate only the specified files.
+
+Class structure:
+
+Child classes of test_data_generation.BaseTarget (file targets) represent an output
+file. These indicate where test cases will be written to, for all subclasses of
+this target. Multiple file targets should not reuse a `target_basename`.
+
+Each subclass derived from a file target can either be:
+ - A concrete class, representing a test function, which generates test cases.
+ - An abstract class containing shared methods and attributes, not associated
+ with a test function. An example is BignumOperation, which provides
+ common features used for bignum binary operations.
+
+Both concrete and abstract subclasses can be derived from, to implement
+additional test cases (see BignumCmp and BignumCmpAbs for examples of deriving
+from abstract and concrete classes).
+
+
+Adding test case generation for a function:
+
+A subclass representing the test function should be added, deriving from a
+file target such as BignumTarget. This test class must set/implement the
+following:
+ - test_function: the function name from the associated .function file.
+ - test_name: a descriptive name or brief summary to refer to the test
+ function.
+ - arguments(): a method to generate the list of arguments required for the
+ test_function.
+ - generate_function_tests(): a method to generate TestCases for the function.
+ This should create instances of the class with required input data, and
+ call `.create_test_case()` to yield the TestCase.
+
+Additional details and other attributes/methods are given in the documentation
+of BaseTarget in test_data_generation.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import sys
+
+from abc import ABCMeta
+from typing import List
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import test_data_generation
+from mbedtls_dev import bignum_common
+# Import modules containing additional test classes
+# Test function classes in these modules will be registered by
+# the framework
+from mbedtls_dev import bignum_core, bignum_mod_raw, bignum_mod # pylint: disable=unused-import
+
+class BignumTarget(test_data_generation.BaseTarget):
+ #pylint: disable=too-few-public-methods
+ """Target for bignum (legacy) test case generation."""
+ target_basename = 'test_suite_bignum.generated'
+
+
+class BignumOperation(bignum_common.OperationCommon, BignumTarget,
+ metaclass=ABCMeta):
+ #pylint: disable=abstract-method
+ """Common features for bignum operations in legacy tests."""
+ unique_combinations_only = True
+ input_values = [
+ "", "0", "-", "-0",
+ "7b", "-7b",
+ "0000000000000000123", "-0000000000000000123",
+ "1230000000000000000", "-1230000000000000000"
+ ]
+
+ def description_suffix(self) -> str:
+ #pylint: disable=no-self-use # derived classes need self
+ """Text to add at the end of the test case description."""
+ return ""
+
+ def description(self) -> str:
+ """Generate a description for the test case.
+
+ If not set, case_description uses the form A `symbol` B, where symbol
+ is used to represent the operation. Descriptions of each value are
+ generated to provide some context to the test case.
+ """
+ if not self.case_description:
+ self.case_description = "{} {} {}".format(
+ self.value_description(self.arg_a),
+ self.symbol,
+ self.value_description(self.arg_b)
+ )
+ description_suffix = self.description_suffix()
+ if description_suffix:
+ self.case_description += " " + description_suffix
+ return super().description()
+
+ @staticmethod
+ def value_description(val) -> str:
+ """Generate a description of the argument val.
+
+ This produces a simple description of the value, which is used in test
+ case naming to add context.
+ """
+ if val == "":
+ return "0 (null)"
+ if val == "-":
+ return "negative 0 (null)"
+ if val == "0":
+ return "0 (1 limb)"
+
+ if val[0] == "-":
+ tmp = "negative"
+ val = val[1:]
+ else:
+ tmp = "positive"
+ if val[0] == "0":
+ tmp += " with leading zero limb"
+ elif len(val) > 10:
+ tmp = "large " + tmp
+ return tmp
+
+
+class BignumCmp(BignumOperation):
+ """Test cases for bignum value comparison."""
+ count = 0
+ test_function = "mpi_cmp_mpi"
+ test_name = "MPI compare"
+ input_cases = [
+ ("-2", "-3"),
+ ("-2", "-2"),
+ ("2b4", "2b5"),
+ ("2b5", "2b6")
+ ]
+
+ def __init__(self, val_a, val_b) -> None:
+ super().__init__(val_a, val_b)
+ self._result = int(self.int_a > self.int_b) - int(self.int_a < self.int_b)
+ self.symbol = ["<", "==", ">"][self._result + 1]
+
+ def result(self) -> List[str]:
+ return [str(self._result)]
+
+
+class BignumCmpAbs(BignumCmp):
+ """Test cases for absolute bignum value comparison."""
+ count = 0
+ test_function = "mpi_cmp_abs"
+ test_name = "MPI compare (abs)"
+
+ def __init__(self, val_a, val_b) -> None:
+ super().__init__(val_a.strip("-"), val_b.strip("-"))
+
+
+class BignumAdd(BignumOperation):
+ """Test cases for bignum value addition."""
+ count = 0
+ symbol = "+"
+ test_function = "mpi_add_mpi"
+ test_name = "MPI add"
+ input_cases = bignum_common.combination_pairs(
+ [
+ "1c67967269c6", "9cde3",
+ "-1c67967269c6", "-9cde3",
+ ]
+ )
+
+ def __init__(self, val_a: str, val_b: str) -> None:
+ super().__init__(val_a, val_b)
+ self._result = self.int_a + self.int_b
+
+ def description_suffix(self) -> str:
+ if (self.int_a >= 0 and self.int_b >= 0):
+ return "" # obviously positive result or 0
+ if (self.int_a <= 0 and self.int_b <= 0):
+ return "" # obviously negative result or 0
+ # The sign of the result is not obvious, so indicate it
+ return ", result{}0".format('>' if self._result > 0 else
+ '<' if self._result < 0 else '=')
+
+ def result(self) -> List[str]:
+ return [bignum_common.quote_str("{:x}".format(self._result))]
+
+if __name__ == '__main__':
+ # Use the section of the docstring relevant to the CLI as description
+ test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/tests/scripts/generate_ecp_tests.py b/tests/scripts/generate_ecp_tests.py
new file mode 100755
index 0000000..df1e469
--- /dev/null
+++ b/tests/scripts/generate_ecp_tests.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+"""Generate test data for ecp functions.
+
+The command line usage, class structure and available methods are the same
+as in generate_bignum_tests.py.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import sys
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import test_data_generation
+# Import modules containing additional test classes
+# Test function classes in these modules will be registered by
+# the framework
+from mbedtls_dev import ecp # pylint: disable=unused-import
+
+if __name__ == '__main__':
+ # Use the section of the docstring relevant to the CLI as description
+ test_data_generation.main(sys.argv[1:], "\n".join(__doc__.splitlines()[:4]))
diff --git a/tests/scripts/generate_pkcs7_tests.py b/tests/scripts/generate_pkcs7_tests.py
new file mode 100755
index 0000000..0e484b0
--- /dev/null
+++ b/tests/scripts/generate_pkcs7_tests.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python3
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+"""
+Make fuzz like testing for pkcs7 tests
+Given a valid DER pkcs7 file add tests to the test_suite_pkcs7.data file
+ - It is expected that the pkcs7_asn1_fail( data_t *pkcs7_buf )
+ function is defined in test_suite_pkcs7.function
+ - This is not meant to be portable code, if anything it is meant to serve as
+ documentation for showing how those ugly tests in test_suite_pkcs7.data were created
+"""
+
+
+import sys
+from os.path import exists
+
+PKCS7_TEST_FILE = "../suites/test_suite_pkcs7.data"
+
+class Test: # pylint: disable=too-few-public-methods
+ """
+ A instance of a test in test_suite_pkcs7.data
+ """
+ def __init__(self, name, depends, func_call):
+ self.name = name
+ self.depends = depends
+ self.func_call = func_call
+
+ # pylint: disable=no-self-use
+ def to_string(self):
+ return "\n" + self.name + "\n" + self.depends + "\n" + self.func_call + "\n"
+
+class TestData:
+ """
+ Take in test_suite_pkcs7.data file.
+ Allow for new tests to be added.
+ """
+ mandatory_dep = "MBEDTLS_MD_CAN_SHA256"
+ test_name = "PKCS7 Parse Failure Invalid ASN1"
+ test_function = "pkcs7_asn1_fail:"
+ def __init__(self, file_name):
+ self.file_name = file_name
+ self.last_test_num, self.old_tests = self.read_test_file(file_name)
+ self.new_tests = []
+
+ # pylint: disable=no-self-use
+ def read_test_file(self, file):
+ """
+ Parse the test_suite_pkcs7.data file.
+ """
+ tests = []
+ if not exists(file):
+ print(file + " Does not exist")
+ sys.exit()
+ with open(file, "r", encoding='UTF-8') as fp:
+ data = fp.read()
+ lines = [line.strip() for line in data.split('\n') if len(line.strip()) > 1]
+ i = 0
+ while i < len(lines):
+ if "depends" in lines[i+1]:
+ tests.append(Test(lines[i], lines[i+1], lines[i+2]))
+ i += 3
+ else:
+ tests.append(Test(lines[i], None, lines[i+1]))
+ i += 2
+ latest_test_num = float(tests[-1].name.split('#')[1])
+ return latest_test_num, tests
+
+ def add(self, name, func_call):
+ self.last_test_num += 1
+ self.new_tests.append(Test(self.test_name + ": " + name + " #" + \
+ str(self.last_test_num), "depends_on:" + self.mandatory_dep, \
+ self.test_function + '"' + func_call + '"'))
+
+ def write_changes(self):
+ with open(self.file_name, 'a', encoding='UTF-8') as fw:
+ fw.write("\n")
+ for t in self.new_tests:
+ fw.write(t.to_string())
+
+
+def asn1_mutate(data):
+ """
+ We have been given an asn1 structure representing a pkcs7.
+ We want to return an array of slightly modified versions of this data
+ they should be modified in a way which makes the structure invalid
+
+ We know that asn1 structures are:
+ |---1 byte showing data type---|----byte(s) for length of data---|---data content--|
+ We know that some data types can contain other data types.
+ Return a dictionary of reasons and mutated data types.
+ """
+
+ # off the bat just add bytes to start and end of the buffer
+ mutations = []
+ reasons = []
+ mutations.append(["00"] + data)
+ reasons.append("Add null byte to start")
+ mutations.append(data + ["00"])
+ reasons.append("Add null byte to end")
+ # for every asn1 entry we should attempt to:
+ # - change the data type tag
+ # - make the length longer than actual
+ # - make the length shorter than actual
+ i = 0
+ while i < len(data):
+ tag_i = i
+ leng_i = tag_i + 1
+ data_i = leng_i + 1 + (int(data[leng_i][1], 16) if data[leng_i][0] == '8' else 0)
+ if data[leng_i][0] == '8':
+ length = int(''.join(data[leng_i + 1: data_i]), 16)
+ else:
+ length = int(data[leng_i], 16)
+
+ tag = data[tag_i]
+ print("Looking at ans1: offset " + str(i) + " tag = " + tag + \
+ ", length = " + str(length)+ ":")
+ print(''.join(data[data_i:data_i+length]))
+ # change tag to something else
+ if tag == "02":
+ # turn integers into octet strings
+ new_tag = "04"
+ else:
+ # turn everything else into an integer
+ new_tag = "02"
+ mutations.append(data[:tag_i] + [new_tag] + data[leng_i:])
+ reasons.append("Change tag " + tag + " to " + new_tag)
+
+ # change lengths to too big
+ # skip any edge cases which would cause carry over
+ if int(data[data_i - 1], 16) < 255:
+ new_length = str(hex(int(data[data_i - 1], 16) + 1))[2:]
+ if len(new_length) == 1:
+ new_length = "0"+new_length
+ mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
+ reasons.append("Change length from " + str(length) + " to " \
+ + str(length + 1))
+ # we can add another test here for tags that contain other tags \
+ # where they have more data than there containing tags account for
+ if tag in ["30", "a0", "31"]:
+ mutations.append(data[:data_i -1] + [new_length] + \
+ data[data_i:data_i + length] + ["00"] + \
+ data[data_i + length:])
+ reasons.append("Change contents of tag " + tag + " to contain \
+ one unaccounted extra byte")
+ # change lengths to too small
+ if int(data[data_i - 1], 16) > 0:
+ new_length = str(hex(int(data[data_i - 1], 16) - 1))[2:]
+ if len(new_length) == 1:
+ new_length = "0"+new_length
+ mutations.append(data[:data_i -1] + [new_length] + data[data_i:])
+ reasons.append("Change length from " + str(length) + " to " + str(length - 1))
+
+ # some tag types contain other tag types so we should iterate into the data
+ if tag in ["30", "a0", "31"]:
+ i = data_i
+ else:
+ i = data_i + length
+
+ return list(zip(reasons, mutations))
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ print("USAGE: " + sys.argv[0] + " <pkcs7_der_file>")
+ sys.exit()
+
+ DATA_FILE = sys.argv[1]
+ TEST_DATA = TestData(PKCS7_TEST_FILE)
+ with open(DATA_FILE, 'rb') as f:
+ DATA_STR = f.read().hex()
+ # make data an array of byte strings eg ['de','ad','be','ef']
+ HEX_DATA = list(map(''.join, [[DATA_STR[i], DATA_STR[i+1]] for i in range(0, len(DATA_STR), \
+ 2)]))
+ # returns tuples of test_names and modified data buffers
+ MUT_ARR = asn1_mutate(HEX_DATA)
+
+ print("made " + str(len(MUT_ARR)) + " new tests")
+ for new_test in MUT_ARR:
+ TEST_DATA.add(new_test[0], ''.join(new_test[1]))
+
+ TEST_DATA.write_changes()
diff --git a/tests/scripts/generate_psa_tests.py b/tests/scripts/generate_psa_tests.py
new file mode 100755
index 0000000..fd278f8
--- /dev/null
+++ b/tests/scripts/generate_psa_tests.py
@@ -0,0 +1,850 @@
+#!/usr/bin/env python3
+"""Generate test data for PSA cryptographic mechanisms.
+
+With no arguments, generate all test data. With non-option arguments,
+generate only the specified files.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import enum
+import re
+import sys
+from typing import Callable, Dict, FrozenSet, Iterable, Iterator, List, Optional
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import crypto_data_tests
+from mbedtls_dev import crypto_knowledge
+from mbedtls_dev import macro_collector #pylint: disable=unused-import
+from mbedtls_dev import psa_information
+from mbedtls_dev import psa_storage
+from mbedtls_dev import test_case
+from mbedtls_dev import test_data_generation
+
+
+
+def test_case_for_key_type_not_supported(
+ verb: str, key_type: str, bits: int,
+ dependencies: List[str],
+ *args: str,
+ param_descr: str = ''
+) -> test_case.TestCase:
+ """Return one test case exercising a key creation method
+ for an unsupported key type or size.
+ """
+ psa_information.hack_dependencies_not_implemented(dependencies)
+ tc = test_case.TestCase()
+ short_key_type = crypto_knowledge.short_expression(key_type)
+ adverb = 'not' if dependencies else 'never'
+ if param_descr:
+ adverb = param_descr + ' ' + adverb
+ tc.set_description('PSA {} {} {}-bit {} supported'
+ .format(verb, short_key_type, bits, adverb))
+ tc.set_dependencies(dependencies)
+ tc.set_function(verb + '_not_supported')
+ tc.set_arguments([key_type] + list(args))
+ return tc
+
+class KeyTypeNotSupported:
+ """Generate test cases for when a key type is not supported."""
+
+ def __init__(self, info: psa_information.Information) -> None:
+ self.constructors = info.constructors
+
+ ALWAYS_SUPPORTED = frozenset([
+ 'PSA_KEY_TYPE_DERIVE',
+ 'PSA_KEY_TYPE_PASSWORD',
+ 'PSA_KEY_TYPE_PASSWORD_HASH',
+ 'PSA_KEY_TYPE_RAW_DATA',
+ 'PSA_KEY_TYPE_HMAC'
+ ])
+ def test_cases_for_key_type_not_supported(
+ self,
+ kt: crypto_knowledge.KeyType,
+ param: Optional[int] = None,
+ param_descr: str = '',
+ ) -> Iterator[test_case.TestCase]:
+ """Return test cases exercising key creation when the given type is unsupported.
+
+ If param is present and not None, emit test cases conditioned on this
+ parameter not being supported. If it is absent or None, emit test cases
+ conditioned on the base type not being supported.
+ """
+ if kt.name in self.ALWAYS_SUPPORTED:
+ # Don't generate test cases for key types that are always supported.
+ # They would be skipped in all configurations, which is noise.
+ return
+ import_dependencies = [('!' if param is None else '') +
+ psa_information.psa_want_symbol(kt.name)]
+ if kt.params is not None:
+ import_dependencies += [('!' if param == i else '') +
+ psa_information.psa_want_symbol(sym)
+ for i, sym in enumerate(kt.params)]
+ if kt.name.endswith('_PUBLIC_KEY'):
+ generate_dependencies = []
+ else:
+ generate_dependencies = \
+ psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
+ import_dependencies = \
+ psa_information.fix_key_pair_dependencies(import_dependencies, 'BASIC')
+ for bits in kt.sizes_to_test():
+ yield test_case_for_key_type_not_supported(
+ 'import', kt.expression, bits,
+ psa_information.finish_family_dependencies(import_dependencies, bits),
+ test_case.hex_string(kt.key_material(bits)),
+ param_descr=param_descr,
+ )
+ if not generate_dependencies and param is not None:
+ # If generation is impossible for this key type, rather than
+ # supported or not depending on implementation capabilities,
+ # only generate the test case once.
+ continue
+ # For public key we expect that key generation fails with
+ # INVALID_ARGUMENT. It is handled by KeyGenerate class.
+ if not kt.is_public():
+ yield test_case_for_key_type_not_supported(
+ 'generate', kt.expression, bits,
+ psa_information.finish_family_dependencies(generate_dependencies, bits),
+ str(bits),
+ param_descr=param_descr,
+ )
+ # To be added: derive
+
+ ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
+ 'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
+ DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
+ 'PSA_KEY_TYPE_DH_PUBLIC_KEY')
+
+ def test_cases_for_not_supported(self) -> Iterator[test_case.TestCase]:
+ """Generate test cases that exercise the creation of keys of unsupported types."""
+ for key_type in sorted(self.constructors.key_types):
+ if key_type in self.ECC_KEY_TYPES:
+ continue
+ if key_type in self.DH_KEY_TYPES:
+ continue
+ kt = crypto_knowledge.KeyType(key_type)
+ yield from self.test_cases_for_key_type_not_supported(kt)
+ for curve_family in sorted(self.constructors.ecc_curves):
+ for constr in self.ECC_KEY_TYPES:
+ kt = crypto_knowledge.KeyType(constr, [curve_family])
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, param_descr='type')
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, 0, param_descr='curve')
+ for dh_family in sorted(self.constructors.dh_groups):
+ for constr in self.DH_KEY_TYPES:
+ kt = crypto_knowledge.KeyType(constr, [dh_family])
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, param_descr='type')
+ yield from self.test_cases_for_key_type_not_supported(
+ kt, 0, param_descr='group')
+
+def test_case_for_key_generation(
+ key_type: str, bits: int,
+ dependencies: List[str],
+ *args: str,
+ result: str = ''
+) -> test_case.TestCase:
+ """Return one test case exercising a key generation.
+ """
+ psa_information.hack_dependencies_not_implemented(dependencies)
+ tc = test_case.TestCase()
+ short_key_type = crypto_knowledge.short_expression(key_type)
+ tc.set_description('PSA {} {}-bit'
+ .format(short_key_type, bits))
+ tc.set_dependencies(dependencies)
+ tc.set_function('generate_key')
+ tc.set_arguments([key_type] + list(args) + [result])
+
+ return tc
+
+class KeyGenerate:
+ """Generate positive and negative (invalid argument) test cases for key generation."""
+
+ def __init__(self, info: psa_information.Information) -> None:
+ self.constructors = info.constructors
+
+ ECC_KEY_TYPES = ('PSA_KEY_TYPE_ECC_KEY_PAIR',
+ 'PSA_KEY_TYPE_ECC_PUBLIC_KEY')
+ DH_KEY_TYPES = ('PSA_KEY_TYPE_DH_KEY_PAIR',
+ 'PSA_KEY_TYPE_DH_PUBLIC_KEY')
+
+ @staticmethod
+ def test_cases_for_key_type_key_generation(
+ kt: crypto_knowledge.KeyType
+ ) -> Iterator[test_case.TestCase]:
+ """Return test cases exercising key generation.
+
+ All key types can be generated except for public keys. For public key
+ PSA_ERROR_INVALID_ARGUMENT status is expected.
+ """
+ result = 'PSA_SUCCESS'
+
+ import_dependencies = [psa_information.psa_want_symbol(kt.name)]
+ if kt.params is not None:
+ import_dependencies += [psa_information.psa_want_symbol(sym)
+ for i, sym in enumerate(kt.params)]
+ if kt.name.endswith('_PUBLIC_KEY'):
+ # The library checks whether the key type is a public key generically,
+ # before it reaches a point where it needs support for the specific key
+ # type, so it returns INVALID_ARGUMENT for unsupported public key types.
+ generate_dependencies = []
+ result = 'PSA_ERROR_INVALID_ARGUMENT'
+ else:
+ generate_dependencies = \
+ psa_information.fix_key_pair_dependencies(import_dependencies, 'GENERATE')
+ for bits in kt.sizes_to_test():
+ if kt.name == 'PSA_KEY_TYPE_RSA_KEY_PAIR':
+ size_dependency = "PSA_VENDOR_RSA_GENERATE_MIN_KEY_BITS <= " + str(bits)
+ test_dependencies = generate_dependencies + [size_dependency]
+ else:
+ test_dependencies = generate_dependencies
+ yield test_case_for_key_generation(
+ kt.expression, bits,
+ psa_information.finish_family_dependencies(test_dependencies, bits),
+ str(bits),
+ result
+ )
+
+ def test_cases_for_key_generation(self) -> Iterator[test_case.TestCase]:
+ """Generate test cases that exercise the generation of keys."""
+ for key_type in sorted(self.constructors.key_types):
+ if key_type in self.ECC_KEY_TYPES:
+ continue
+ if key_type in self.DH_KEY_TYPES:
+ continue
+ kt = crypto_knowledge.KeyType(key_type)
+ yield from self.test_cases_for_key_type_key_generation(kt)
+ for curve_family in sorted(self.constructors.ecc_curves):
+ for constr in self.ECC_KEY_TYPES:
+ kt = crypto_knowledge.KeyType(constr, [curve_family])
+ yield from self.test_cases_for_key_type_key_generation(kt)
+ for dh_family in sorted(self.constructors.dh_groups):
+ for constr in self.DH_KEY_TYPES:
+ kt = crypto_knowledge.KeyType(constr, [dh_family])
+ yield from self.test_cases_for_key_type_key_generation(kt)
+
+class OpFail:
+ """Generate test cases for operations that must fail."""
+ #pylint: disable=too-few-public-methods
+
+ class Reason(enum.Enum):
+ NOT_SUPPORTED = 0
+ INVALID = 1
+ INCOMPATIBLE = 2
+ PUBLIC = 3
+
+ def __init__(self, info: psa_information.Information) -> None:
+ self.constructors = info.constructors
+ key_type_expressions = self.constructors.generate_expressions(
+ sorted(self.constructors.key_types)
+ )
+ self.key_types = [crypto_knowledge.KeyType(kt_expr)
+ for kt_expr in key_type_expressions]
+
+ def make_test_case(
+ self,
+ alg: crypto_knowledge.Algorithm,
+ category: crypto_knowledge.AlgorithmCategory,
+ reason: 'Reason',
+ kt: Optional[crypto_knowledge.KeyType] = None,
+ not_deps: FrozenSet[str] = frozenset(),
+ ) -> test_case.TestCase:
+ """Construct a failure test case for a one-key or keyless operation."""
+ #pylint: disable=too-many-arguments,too-many-locals
+ tc = test_case.TestCase()
+ pretty_alg = alg.short_expression()
+ if reason == self.Reason.NOT_SUPPORTED:
+ short_deps = [re.sub(r'PSA_WANT_ALG_', r'', dep)
+ for dep in not_deps]
+ pretty_reason = '!' + '&'.join(sorted(short_deps))
+ else:
+ pretty_reason = reason.name.lower()
+ if kt:
+ key_type = kt.expression
+ pretty_type = kt.short_expression()
+ else:
+ key_type = ''
+ pretty_type = ''
+ tc.set_description('PSA {} {}: {}{}'
+ .format(category.name.lower(),
+ pretty_alg,
+ pretty_reason,
+ ' with ' + pretty_type if pretty_type else ''))
+ dependencies = psa_information.automatic_dependencies(alg.base_expression, key_type)
+ dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
+ for i, dep in enumerate(dependencies):
+ if dep in not_deps:
+ dependencies[i] = '!' + dep
+ tc.set_dependencies(dependencies)
+ tc.set_function(category.name.lower() + '_fail')
+ arguments = [] # type: List[str]
+ if kt:
+ key_material = kt.key_material(kt.sizes_to_test()[0])
+ arguments += [key_type, test_case.hex_string(key_material)]
+ arguments.append(alg.expression)
+ if category.is_asymmetric():
+ arguments.append('1' if reason == self.Reason.PUBLIC else '0')
+ error = ('NOT_SUPPORTED' if reason == self.Reason.NOT_SUPPORTED else
+ 'INVALID_ARGUMENT')
+ arguments.append('PSA_ERROR_' + error)
+ tc.set_arguments(arguments)
+ return tc
+
+ def no_key_test_cases(
+ self,
+ alg: crypto_knowledge.Algorithm,
+ category: crypto_knowledge.AlgorithmCategory,
+ ) -> Iterator[test_case.TestCase]:
+ """Generate failure test cases for keyless operations with the specified algorithm."""
+ if alg.can_do(category):
+ # Compatible operation, unsupported algorithm
+ for dep in psa_information.automatic_dependencies(alg.base_expression):
+ yield self.make_test_case(alg, category,
+ self.Reason.NOT_SUPPORTED,
+ not_deps=frozenset([dep]))
+ else:
+ # Incompatible operation, supported algorithm
+ yield self.make_test_case(alg, category, self.Reason.INVALID)
+
+ def one_key_test_cases(
+ self,
+ alg: crypto_knowledge.Algorithm,
+ category: crypto_knowledge.AlgorithmCategory,
+ ) -> Iterator[test_case.TestCase]:
+ """Generate failure test cases for one-key operations with the specified algorithm."""
+ for kt in self.key_types:
+ key_is_compatible = kt.can_do(alg)
+ if key_is_compatible and alg.can_do(category):
+ # Compatible key and operation, unsupported algorithm
+ for dep in psa_information.automatic_dependencies(alg.base_expression):
+ yield self.make_test_case(alg, category,
+ self.Reason.NOT_SUPPORTED,
+ kt=kt, not_deps=frozenset([dep]))
+ # Public key for a private-key operation
+ if category.is_asymmetric() and kt.is_public():
+ yield self.make_test_case(alg, category,
+ self.Reason.PUBLIC,
+ kt=kt)
+ elif key_is_compatible:
+ # Compatible key, incompatible operation, supported algorithm
+ yield self.make_test_case(alg, category,
+ self.Reason.INVALID,
+ kt=kt)
+ elif alg.can_do(category):
+ # Incompatible key, compatible operation, supported algorithm
+ yield self.make_test_case(alg, category,
+ self.Reason.INCOMPATIBLE,
+ kt=kt)
+ else:
+ # Incompatible key and operation. Don't test cases where
+ # multiple things are wrong, to keep the number of test
+ # cases reasonable.
+ pass
+
+ def test_cases_for_algorithm(
+ self,
+ alg: crypto_knowledge.Algorithm,
+ ) -> Iterator[test_case.TestCase]:
+ """Generate operation failure test cases for the specified algorithm."""
+ for category in crypto_knowledge.AlgorithmCategory:
+ if category == crypto_knowledge.AlgorithmCategory.PAKE:
+ # PAKE operations are not implemented yet
+ pass
+ elif category.requires_key():
+ yield from self.one_key_test_cases(alg, category)
+ else:
+ yield from self.no_key_test_cases(alg, category)
+
+ def all_test_cases(self) -> Iterator[test_case.TestCase]:
+ """Generate all test cases for operations that must fail."""
+ algorithms = sorted(self.constructors.algorithms)
+ for expr in self.constructors.generate_expressions(algorithms):
+ alg = crypto_knowledge.Algorithm(expr)
+ yield from self.test_cases_for_algorithm(alg)
+
+
+class StorageKey(psa_storage.Key):
+ """Representation of a key for storage format testing."""
+
+ IMPLICIT_USAGE_FLAGS = {
+ 'PSA_KEY_USAGE_SIGN_HASH': 'PSA_KEY_USAGE_SIGN_MESSAGE',
+ 'PSA_KEY_USAGE_VERIFY_HASH': 'PSA_KEY_USAGE_VERIFY_MESSAGE'
+ } #type: Dict[str, str]
+ """Mapping of usage flags to the flags that they imply."""
+
+ def __init__(
+ self,
+ usage: Iterable[str],
+ without_implicit_usage: Optional[bool] = False,
+ **kwargs
+ ) -> None:
+ """Prepare to generate a key.
+
+ * `usage` : The usage flags used for the key.
+ * `without_implicit_usage`: Flag to define to apply the usage extension
+ """
+ usage_flags = set(usage)
+ if not without_implicit_usage:
+ for flag in sorted(usage_flags):
+ if flag in self.IMPLICIT_USAGE_FLAGS:
+ usage_flags.add(self.IMPLICIT_USAGE_FLAGS[flag])
+ if usage_flags:
+ usage_expression = ' | '.join(sorted(usage_flags))
+ else:
+ usage_expression = '0'
+ super().__init__(usage=usage_expression, **kwargs)
+
+class StorageTestData(StorageKey):
+ """Representation of test case data for storage format testing."""
+
+ def __init__(
+ self,
+ description: str,
+ expected_usage: Optional[List[str]] = None,
+ **kwargs
+ ) -> None:
+ """Prepare to generate test data
+
+ * `description` : used for the test case names
+ * `expected_usage`: the usage flags generated as the expected usage flags
+ in the test cases. CAn differ from the usage flags
+ stored in the keys because of the usage flags extension.
+ """
+ super().__init__(**kwargs)
+ self.description = description #type: str
+ if expected_usage is None:
+ self.expected_usage = self.usage #type: psa_storage.Expr
+ elif expected_usage:
+ self.expected_usage = psa_storage.Expr(' | '.join(expected_usage))
+ else:
+ self.expected_usage = psa_storage.Expr(0)
+
+class StorageFormat:
+ """Storage format stability test cases."""
+
+ def __init__(self, info: psa_information.Information, version: int, forward: bool) -> None:
+ """Prepare to generate test cases for storage format stability.
+
+ * `info`: information about the API. See the `Information` class.
+ * `version`: the storage format version to generate test cases for.
+ * `forward`: if true, generate forward compatibility test cases which
+ save a key and check that its representation is as intended. Otherwise
+ generate backward compatibility test cases which inject a key
+ representation and check that it can be read and used.
+ """
+ self.constructors = info.constructors #type: macro_collector.PSAMacroEnumerator
+ self.version = version #type: int
+ self.forward = forward #type: bool
+
+ RSA_OAEP_RE = re.compile(r'PSA_ALG_RSA_OAEP\((.*)\)\Z')
+ BRAINPOOL_RE = re.compile(r'PSA_KEY_TYPE_\w+\(PSA_ECC_FAMILY_BRAINPOOL_\w+\)\Z')
+ @classmethod
+ def exercise_key_with_algorithm(
+ cls,
+ key_type: psa_storage.Expr, bits: int,
+ alg: psa_storage.Expr
+ ) -> bool:
+ """Whether to exercise the given key with the given algorithm.
+
+ Normally only the type and algorithm matter for compatibility, and
+ this is handled in crypto_knowledge.KeyType.can_do(). This function
+ exists to detect exceptional cases. Exceptional cases detected here
+ are not tested in OpFail and should therefore have manually written
+ test cases.
+ """
+ # Some test keys have the RAW_DATA type and attributes that don't
+ # necessarily make sense. We do this to validate numerical
+ # encodings of the attributes.
+ # Raw data keys have no useful exercise anyway so there is no
+ # loss of test coverage.
+ if key_type.string == 'PSA_KEY_TYPE_RAW_DATA':
+ return False
+ # OAEP requires room for two hashes plus wrapping
+ m = cls.RSA_OAEP_RE.match(alg.string)
+ if m:
+ hash_alg = m.group(1)
+ hash_length = crypto_knowledge.Algorithm.hash_length(hash_alg)
+ key_length = (bits + 7) // 8
+ # Leave enough room for at least one byte of plaintext
+ return key_length > 2 * hash_length + 2
+ # There's nothing wrong with ECC keys on Brainpool curves,
+ # but operations with them are very slow. So we only exercise them
+ # with a single algorithm, not with all possible hashes. We do
+ # exercise other curves with all algorithms so test coverage is
+ # perfectly adequate like this.
+ m = cls.BRAINPOOL_RE.match(key_type.string)
+ if m and alg.string != 'PSA_ALG_ECDSA_ANY':
+ return False
+ return True
+
+ def make_test_case(self, key: StorageTestData) -> test_case.TestCase:
+ """Construct a storage format test case for the given key.
+
+ If ``forward`` is true, generate a forward compatibility test case:
+ create a key and validate that it has the expected representation.
+ Otherwise generate a backward compatibility test case: inject the
+ key representation into storage and validate that it can be read
+ correctly.
+ """
+ verb = 'save' if self.forward else 'read'
+ tc = test_case.TestCase()
+ tc.set_description(verb + ' ' + key.description)
+ dependencies = psa_information.automatic_dependencies(
+ key.lifetime.string, key.type.string,
+ key.alg.string, key.alg2.string,
+ )
+ dependencies = psa_information.finish_family_dependencies(dependencies, key.bits)
+ dependencies += psa_information.generate_deps_from_description(key.description)
+ dependencies = psa_information.fix_key_pair_dependencies(dependencies, 'BASIC')
+ tc.set_dependencies(dependencies)
+ tc.set_function('key_storage_' + verb)
+ if self.forward:
+ extra_arguments = []
+ else:
+ flags = []
+ if self.exercise_key_with_algorithm(key.type, key.bits, key.alg):
+ flags.append('TEST_FLAG_EXERCISE')
+ if 'READ_ONLY' in key.lifetime.string:
+ flags.append('TEST_FLAG_READ_ONLY')
+ extra_arguments = [' | '.join(flags) if flags else '0']
+ tc.set_arguments([key.lifetime.string,
+ key.type.string, str(key.bits),
+ key.expected_usage.string,
+ key.alg.string, key.alg2.string,
+ '"' + key.material.hex() + '"',
+ '"' + key.hex() + '"',
+ *extra_arguments])
+ return tc
+
+ def key_for_lifetime(
+ self,
+ lifetime: str,
+ ) -> StorageTestData:
+ """Construct a test key for the given lifetime."""
+ short = lifetime
+ short = re.sub(r'PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION',
+ r'', short)
+ short = crypto_knowledge.short_expression(short)
+ description = 'lifetime: ' + short
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=lifetime,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage=['PSA_KEY_USAGE_EXPORT'], alg=0, alg2=0,
+ material=b'L',
+ description=description)
+ return key
+
+ def all_keys_for_lifetimes(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering lifetimes."""
+ lifetimes = sorted(self.constructors.lifetimes)
+ expressions = self.constructors.generate_expressions(lifetimes)
+ for lifetime in expressions:
+ # Don't attempt to create or load a volatile key in storage
+ if 'VOLATILE' in lifetime:
+ continue
+ # Don't attempt to create a read-only key in storage,
+ # but do attempt to load one.
+ if 'READ_ONLY' in lifetime and self.forward:
+ continue
+ yield self.key_for_lifetime(lifetime)
+
+ def key_for_usage_flags(
+ self,
+ usage_flags: List[str],
+ short: Optional[str] = None,
+ test_implicit_usage: Optional[bool] = True
+ ) -> StorageTestData:
+ """Construct a test key for the given key usage."""
+ extra_desc = ' without implication' if test_implicit_usage else ''
+ description = 'usage' + extra_desc + ': '
+ key1 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ expected_usage=usage_flags,
+ without_implicit_usage=not test_implicit_usage,
+ usage=usage_flags, alg=0, alg2=0,
+ material=b'K',
+ description=description)
+ if short is None:
+ usage_expr = key1.expected_usage.string
+ key1.description += crypto_knowledge.short_expression(usage_expr)
+ else:
+ key1.description += short
+ return key1
+
+ def generate_keys_for_usage_flags(self, **kwargs) -> Iterator[StorageTestData]:
+ """Generate test keys covering usage flags."""
+ known_flags = sorted(self.constructors.key_usage_flags)
+ yield self.key_for_usage_flags(['0'], **kwargs)
+ for usage_flag in known_flags:
+ yield self.key_for_usage_flags([usage_flag], **kwargs)
+ for flag1, flag2 in zip(known_flags,
+ known_flags[1:] + [known_flags[0]]):
+ yield self.key_for_usage_flags([flag1, flag2], **kwargs)
+
+ def generate_key_for_all_usage_flags(self) -> Iterator[StorageTestData]:
+ known_flags = sorted(self.constructors.key_usage_flags)
+ yield self.key_for_usage_flags(known_flags, short='all known')
+
+ def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+ yield from self.generate_keys_for_usage_flags()
+ yield from self.generate_key_for_all_usage_flags()
+
+ def key_for_type_and_alg(
+ self,
+ kt: crypto_knowledge.KeyType,
+ bits: int,
+ alg: Optional[crypto_knowledge.Algorithm] = None,
+ ) -> StorageTestData:
+ """Construct a test key of the given type.
+
+ If alg is not None, this key allows it.
+ """
+ usage_flags = ['PSA_KEY_USAGE_EXPORT']
+ alg1 = 0 #type: psa_storage.Exprable
+ alg2 = 0
+ if alg is not None:
+ alg1 = alg.expression
+ usage_flags += alg.usage_flags(public=kt.is_public())
+ key_material = kt.key_material(bits)
+ description = 'type: {} {}-bit'.format(kt.short_expression(1), bits)
+ if alg is not None:
+ description += ', ' + alg.short_expression(1)
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type=kt.expression, bits=bits,
+ usage=usage_flags, alg=alg1, alg2=alg2,
+ material=key_material,
+ description=description)
+ return key
+
+ def keys_for_type(
+ self,
+ key_type: str,
+ all_algorithms: List[crypto_knowledge.Algorithm],
+ ) -> Iterator[StorageTestData]:
+ """Generate test keys for the given key type."""
+ kt = crypto_knowledge.KeyType(key_type)
+ for bits in kt.sizes_to_test():
+ # Test a non-exercisable key, as well as exercisable keys for
+ # each compatible algorithm.
+ # To do: test reading a key from storage with an incompatible
+ # or unsupported algorithm.
+ yield self.key_for_type_and_alg(kt, bits)
+ compatible_algorithms = [alg for alg in all_algorithms
+ if kt.can_do(alg)]
+ for alg in compatible_algorithms:
+ yield self.key_for_type_and_alg(kt, bits, alg)
+
+ def all_keys_for_types(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering key types and their representations."""
+ key_types = sorted(self.constructors.key_types)
+ all_algorithms = [crypto_knowledge.Algorithm(alg)
+ for alg in self.constructors.generate_expressions(
+ sorted(self.constructors.algorithms)
+ )]
+ for key_type in self.constructors.generate_expressions(key_types):
+ yield from self.keys_for_type(key_type, all_algorithms)
+
+ def keys_for_algorithm(self, alg: str) -> Iterator[StorageTestData]:
+ """Generate test keys for the encoding of the specified algorithm."""
+ # These test cases only validate the encoding of algorithms, not
+ # whether the key read from storage is suitable for an operation.
+ # `keys_for_types` generate read tests with an algorithm and a
+ # compatible key.
+ descr = crypto_knowledge.short_expression(alg, 1)
+ usage = ['PSA_KEY_USAGE_EXPORT']
+ key1 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage=usage, alg=alg, alg2=0,
+ material=b'K',
+ description='alg: ' + descr)
+ yield key1
+ key2 = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type='PSA_KEY_TYPE_RAW_DATA', bits=8,
+ usage=usage, alg=0, alg2=alg,
+ material=b'L',
+ description='alg2: ' + descr)
+ yield key2
+
+ def all_keys_for_algorithms(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering algorithm encodings."""
+ algorithms = sorted(self.constructors.algorithms)
+ for alg in self.constructors.generate_expressions(algorithms):
+ yield from self.keys_for_algorithm(alg)
+
+ def generate_all_keys(self) -> Iterator[StorageTestData]:
+ """Generate all keys for the test cases."""
+ yield from self.all_keys_for_lifetimes()
+ yield from self.all_keys_for_usage_flags()
+ yield from self.all_keys_for_types()
+ yield from self.all_keys_for_algorithms()
+
+ def all_test_cases(self) -> Iterator[test_case.TestCase]:
+ """Generate all storage format test cases."""
+ # First build a list of all keys, then construct all the corresponding
+ # test cases. This allows all required information to be obtained in
+ # one go, which is a significant performance gain as the information
+ # includes numerical values obtained by compiling a C program.
+ all_keys = list(self.generate_all_keys())
+ for key in all_keys:
+ if key.location_value() != 0:
+ # Skip keys with a non-default location, because they
+ # require a driver and we currently have no mechanism to
+ # determine whether a driver is available.
+ continue
+ yield self.make_test_case(key)
+
+class StorageFormatForward(StorageFormat):
+ """Storage format stability test cases for forward compatibility."""
+
+ def __init__(self, info: psa_information.Information, version: int) -> None:
+ super().__init__(info, version, True)
+
+class StorageFormatV0(StorageFormat):
+ """Storage format stability test cases for version 0 compatibility."""
+
+ def __init__(self, info: psa_information.Information) -> None:
+ super().__init__(info, 0, False)
+
+ def all_keys_for_usage_flags(self) -> Iterator[StorageTestData]:
+ """Generate test keys covering usage flags."""
+ yield from super().all_keys_for_usage_flags()
+ yield from self.generate_keys_for_usage_flags(test_implicit_usage=False)
+
+ def keys_for_implicit_usage(
+ self,
+ implyer_usage: str,
+ alg: str,
+ key_type: crypto_knowledge.KeyType
+ ) -> StorageTestData:
+ # pylint: disable=too-many-locals
+ """Generate test keys for the specified implicit usage flag,
+ algorithm and key type combination.
+ """
+ bits = key_type.sizes_to_test()[0]
+ implicit_usage = StorageKey.IMPLICIT_USAGE_FLAGS[implyer_usage]
+ usage_flags = ['PSA_KEY_USAGE_EXPORT']
+ material_usage_flags = usage_flags + [implyer_usage]
+ expected_usage_flags = material_usage_flags + [implicit_usage]
+ alg2 = 0
+ key_material = key_type.key_material(bits)
+ usage_expression = crypto_knowledge.short_expression(implyer_usage, 1)
+ alg_expression = crypto_knowledge.short_expression(alg, 1)
+ key_type_expression = key_type.short_expression(1)
+ description = 'implied by {}: {} {} {}-bit'.format(
+ usage_expression, alg_expression, key_type_expression, bits)
+ key = StorageTestData(version=self.version,
+ id=1, lifetime=0x00000001,
+ type=key_type.expression, bits=bits,
+ usage=material_usage_flags,
+ expected_usage=expected_usage_flags,
+ without_implicit_usage=True,
+ alg=alg, alg2=alg2,
+ material=key_material,
+ description=description)
+ return key
+
+ def gather_key_types_for_sign_alg(self) -> Dict[str, List[str]]:
+ # pylint: disable=too-many-locals
+ """Match possible key types for sign algorithms."""
+ # To create a valid combination both the algorithms and key types
+ # must be filtered. Pair them with keywords created from its names.
+ incompatible_alg_keyword = frozenset(['RAW', 'ANY', 'PURE'])
+ incompatible_key_type_keywords = frozenset(['MONTGOMERY'])
+ keyword_translation = {
+ 'ECDSA': 'ECC',
+ 'ED[0-9]*.*' : 'EDWARDS'
+ }
+ exclusive_keywords = {
+ 'EDWARDS': 'ECC'
+ }
+ key_types = set(self.constructors.generate_expressions(self.constructors.key_types))
+ algorithms = set(self.constructors.generate_expressions(self.constructors.sign_algorithms))
+ alg_with_keys = {} #type: Dict[str, List[str]]
+ translation_table = str.maketrans('(', '_', ')')
+ for alg in algorithms:
+ # Generate keywords from the name of the algorithm
+ alg_keywords = set(alg.partition('(')[0].split(sep='_')[2:])
+ # Translate keywords for better matching with the key types
+ for keyword in alg_keywords.copy():
+ for pattern, replace in keyword_translation.items():
+ if re.match(pattern, keyword):
+ alg_keywords.remove(keyword)
+ alg_keywords.add(replace)
+ # Filter out incompatible algorithms
+ if not alg_keywords.isdisjoint(incompatible_alg_keyword):
+ continue
+
+ for key_type in key_types:
+ # Generate keywords from the of the key type
+ key_type_keywords = set(key_type.translate(translation_table).split(sep='_')[3:])
+
+ # Remove ambiguous keywords
+ for keyword1, keyword2 in exclusive_keywords.items():
+ if keyword1 in key_type_keywords:
+ key_type_keywords.remove(keyword2)
+
+ if key_type_keywords.isdisjoint(incompatible_key_type_keywords) and\
+ not key_type_keywords.isdisjoint(alg_keywords):
+ if alg in alg_with_keys:
+ alg_with_keys[alg].append(key_type)
+ else:
+ alg_with_keys[alg] = [key_type]
+ return alg_with_keys
+
+ def all_keys_for_implicit_usage(self) -> Iterator[StorageTestData]:
+ """Generate test keys for usage flag extensions."""
+ # Generate a key type and algorithm pair for each extendable usage
+ # flag to generate a valid key for exercising. The key is generated
+ # without usage extension to check the extension compatibility.
+ alg_with_keys = self.gather_key_types_for_sign_alg()
+
+ for usage in sorted(StorageKey.IMPLICIT_USAGE_FLAGS, key=str):
+ for alg in sorted(alg_with_keys):
+ for key_type in sorted(alg_with_keys[alg]):
+ # The key types must be filtered to fit the specific usage flag.
+ kt = crypto_knowledge.KeyType(key_type)
+ if kt.is_public() and '_SIGN_' in usage:
+ # Can't sign with a public key
+ continue
+ yield self.keys_for_implicit_usage(usage, alg, kt)
+
+ def generate_all_keys(self) -> Iterator[StorageTestData]:
+ yield from super().generate_all_keys()
+ yield from self.all_keys_for_implicit_usage()
+
+
+class PSATestGenerator(test_data_generation.TestGenerator):
+ """Test generator subclass including PSA targets and info."""
+ # Note that targets whose names contain 'test_format' have their content
+ # validated by `abi_check.py`.
+ targets = {
+ 'test_suite_psa_crypto_generate_key.generated':
+ lambda info: KeyGenerate(info).test_cases_for_key_generation(),
+ 'test_suite_psa_crypto_not_supported.generated':
+ lambda info: KeyTypeNotSupported(info).test_cases_for_not_supported(),
+ 'test_suite_psa_crypto_low_hash.generated':
+ lambda info: crypto_data_tests.HashPSALowLevel(info).all_test_cases(),
+ 'test_suite_psa_crypto_op_fail.generated':
+ lambda info: OpFail(info).all_test_cases(),
+ 'test_suite_psa_crypto_storage_format.current':
+ lambda info: StorageFormatForward(info, 0).all_test_cases(),
+ 'test_suite_psa_crypto_storage_format.v0':
+ lambda info: StorageFormatV0(info).all_test_cases(),
+ } #type: Dict[str, Callable[[psa_information.Information], Iterable[test_case.TestCase]]]
+
+ def __init__(self, options):
+ super().__init__(options)
+ self.info = psa_information.Information()
+
+ def generate_target(self, name: str, *target_args) -> None:
+ super().generate_target(name, self.info)
+
+
+if __name__ == '__main__':
+ test_data_generation.main(sys.argv[1:], __doc__, PSATestGenerator)
diff --git a/tests/scripts/generate_psa_wrappers.py b/tests/scripts/generate_psa_wrappers.py
new file mode 100755
index 0000000..07d1450
--- /dev/null
+++ b/tests/scripts/generate_psa_wrappers.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python3
+"""Generate wrapper functions for PSA function calls.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+### WARNING: the code in this file has not been extensively reviewed yet.
+### We do not think it is harmful, but it may be below our normal standards
+### for robustness and maintainability.
+
+import argparse
+import itertools
+import os
+from typing import Iterator, List, Optional, Tuple
+
+import scripts_path #pylint: disable=unused-import
+from mbedtls_dev import build_tree
+from mbedtls_dev import c_parsing_helper
+from mbedtls_dev import c_wrapper_generator
+from mbedtls_dev import typing_util
+
+
+class BufferParameter:
+ """Description of an input or output buffer parameter sequence to a PSA function."""
+ #pylint: disable=too-few-public-methods
+
+ def __init__(self, i: int, is_output: bool,
+ buffer_name: str, size_name: str) -> None:
+ """Initialize the parameter information.
+
+ i is the index of the function argument that is the pointer to the buffer.
+ The size is argument i+1. For a variable-size output, the actual length
+ goes in argument i+2.
+
+ buffer_name and size_names are the names of arguments i and i+1.
+ This class does not yet help with the output length.
+ """
+ self.index = i
+ self.buffer_name = buffer_name
+ self.size_name = size_name
+ self.is_output = is_output
+
+
+class PSAWrapperGenerator(c_wrapper_generator.Base):
+ """Generate a C source file containing wrapper functions for PSA Crypto API calls."""
+
+ _CPP_GUARDS = ('defined(MBEDTLS_PSA_CRYPTO_C) && ' +
+ 'defined(MBEDTLS_TEST_HOOKS) && \\\n ' +
+ '!defined(RECORD_PSA_STATUS_COVERAGE_LOG)')
+ _WRAPPER_NAME_PREFIX = 'mbedtls_test_wrap_'
+ _WRAPPER_NAME_SUFFIX = ''
+
+ def gather_data(self) -> None:
+ root_dir = build_tree.guess_mbedtls_root()
+ for header_name in ['crypto.h', 'crypto_extra.h']:
+ header_path = os.path.join(root_dir, 'include', 'psa', header_name)
+ c_parsing_helper.read_function_declarations(self.functions, header_path)
+
+ _SKIP_FUNCTIONS = frozenset([
+ 'mbedtls_psa_external_get_random', # not a library function
+ 'psa_get_key_domain_parameters', # client-side function
+ 'psa_get_key_slot_number', # client-side function
+ 'psa_key_derivation_verify_bytes', # not implemented yet
+ 'psa_key_derivation_verify_key', # not implemented yet
+ 'psa_set_key_domain_parameters', # client-side function
+ ])
+
+ def _skip_function(self, function: c_wrapper_generator.FunctionInfo) -> bool:
+ if function.return_type != 'psa_status_t':
+ return True
+ if function.name in self._SKIP_FUNCTIONS:
+ return True
+ return False
+
+ # PAKE stuff: not implemented yet
+ _PAKE_STUFF = frozenset([
+ 'psa_crypto_driver_pake_inputs_t *',
+ 'psa_pake_cipher_suite_t *',
+ ])
+
+ def _return_variable_name(self,
+ function: c_wrapper_generator.FunctionInfo) -> str:
+ """The name of the variable that will contain the return value."""
+ if function.return_type == 'psa_status_t':
+ return 'status'
+ return super()._return_variable_name(function)
+
+ _FUNCTION_GUARDS = c_wrapper_generator.Base._FUNCTION_GUARDS.copy() \
+ #pylint: disable=protected-access
+ _FUNCTION_GUARDS.update({
+ 'mbedtls_psa_register_se_key': 'defined(MBEDTLS_PSA_CRYPTO_SE_C)',
+ 'mbedtls_psa_inject_entropy': 'defined(MBEDTLS_PSA_INJECT_ENTROPY)',
+ 'mbedtls_psa_external_get_random': 'defined(MBEDTLS_PSA_CRYPTO_EXTERNAL_RNG)',
+ 'mbedtls_psa_platform_get_builtin_key': 'defined(MBEDTLS_PSA_CRYPTO_BUILTIN_KEYS)',
+ })
+
+ @staticmethod
+ def _detect_buffer_parameters(arguments: List[c_parsing_helper.ArgumentInfo],
+ argument_names: List[str]) -> Iterator[BufferParameter]:
+ """Detect function arguments that are buffers (pointer, size [,length])."""
+ types = ['' if arg.suffix else arg.type for arg in arguments]
+ # pairs = list of (type_of_arg_N, type_of_arg_N+1)
+ # where each type_of_arg_X is the empty string if the type is an array
+ # or there is no argument X.
+ pairs = enumerate(itertools.zip_longest(types, types[1:], fillvalue=''))
+ for i, t01 in pairs:
+ if (t01[0] == 'const uint8_t *' or t01[0] == 'uint8_t *') and \
+ t01[1] == 'size_t':
+ yield BufferParameter(i, not t01[0].startswith('const '),
+ argument_names[i], argument_names[i+1])
+
+ @staticmethod
+ def _write_poison_buffer_parameter(out: typing_util.Writable,
+ param: BufferParameter,
+ poison: bool) -> None:
+ """Write poisoning or unpoisoning code for a buffer parameter.
+
+ Write poisoning code if poison is true, unpoisoning code otherwise.
+ """
+ out.write(' MBEDTLS_TEST_MEMORY_{}({}, {});\n'.format(
+ 'POISON' if poison else 'UNPOISON',
+ param.buffer_name, param.size_name
+ ))
+
+ def _write_poison_buffer_parameters(self, out: typing_util.Writable,
+ buffer_parameters: List[BufferParameter],
+ poison: bool) -> None:
+ """Write poisoning or unpoisoning code for the buffer parameters.
+
+ Write poisoning code if poison is true, unpoisoning code otherwise.
+ """
+ if not buffer_parameters:
+ return
+ out.write('#if !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS)\n')
+ for param in buffer_parameters:
+ self._write_poison_buffer_parameter(out, param, poison)
+ out.write('#endif /* !defined(MBEDTLS_PSA_ASSUME_EXCLUSIVE_BUFFERS) */\n')
+
+ @staticmethod
+ def _parameter_should_be_copied(function_name: str,
+ _buffer_name: Optional[str]) -> bool:
+ """Whether the specified buffer argument to a PSA function should be copied.
+ """
+ # False-positives that do not need buffer copying
+ if function_name in ('mbedtls_psa_inject_entropy',
+ 'psa_crypto_driver_pake_get_password',
+ 'psa_crypto_driver_pake_get_user',
+ 'psa_crypto_driver_pake_get_peer'):
+ return False
+
+ return True
+
+ def _write_function_call(self, out: typing_util.Writable,
+ function: c_wrapper_generator.FunctionInfo,
+ argument_names: List[str]) -> None:
+ buffer_parameters = list(
+ param
+ for param in self._detect_buffer_parameters(function.arguments,
+ argument_names)
+ if self._parameter_should_be_copied(function.name,
+ function.arguments[param.index].name))
+ self._write_poison_buffer_parameters(out, buffer_parameters, True)
+ super()._write_function_call(out, function, argument_names)
+ self._write_poison_buffer_parameters(out, buffer_parameters, False)
+
+ def _write_prologue(self, out: typing_util.Writable, header: bool) -> None:
+ super()._write_prologue(out, header)
+ out.write("""
+#if {}
+
+#include <psa/crypto.h>
+
+#include <test/memory.h>
+#include <test/psa_crypto_helpers.h>
+#include <test/psa_test_wrappers.h>
+"""
+ .format(self._CPP_GUARDS))
+
+ def _write_epilogue(self, out: typing_util.Writable, header: bool) -> None:
+ out.write("""
+#endif /* {} */
+"""
+ .format(self._CPP_GUARDS))
+ super()._write_epilogue(out, header)
+
+
+class PSALoggingWrapperGenerator(PSAWrapperGenerator, c_wrapper_generator.Logging):
+ """Generate a C source file containing wrapper functions that log PSA Crypto API calls."""
+
+ def __init__(self, stream: str) -> None:
+ super().__init__()
+ self.set_stream(stream)
+
+ _PRINTF_TYPE_CAST = c_wrapper_generator.Logging._PRINTF_TYPE_CAST.copy()
+ _PRINTF_TYPE_CAST.update({
+ 'mbedtls_svc_key_id_t': 'unsigned',
+ 'psa_algorithm_t': 'unsigned',
+ 'psa_drv_slot_number_t': 'unsigned long long',
+ 'psa_key_derivation_step_t': 'int',
+ 'psa_key_id_t': 'unsigned',
+ 'psa_key_slot_number_t': 'unsigned long long',
+ 'psa_key_lifetime_t': 'unsigned',
+ 'psa_key_type_t': 'unsigned',
+ 'psa_key_usage_flags_t': 'unsigned',
+ 'psa_pake_role_t': 'int',
+ 'psa_pake_step_t': 'int',
+ 'psa_status_t': 'int',
+ })
+
+ def _printf_parameters(self, typ: str, var: str) -> Tuple[str, List[str]]:
+ if typ.startswith('const '):
+ typ = typ[6:]
+ if typ == 'uint8_t *':
+ # Skip buffers
+ return '', []
+ if typ.endswith('operation_t *'):
+ return '', []
+ if typ in self._PAKE_STUFF:
+ return '', []
+ if typ == 'psa_key_attributes_t *':
+ return (var + '={id=%u, lifetime=0x%08x, type=0x%08x, bits=%u, alg=%08x, usage=%08x}',
+ ['(unsigned) psa_get_key_{}({})'.format(field, var)
+ for field in ['id', 'lifetime', 'type', 'bits', 'algorithm', 'usage_flags']])
+ return super()._printf_parameters(typ, var)
+
+
+DEFAULT_C_OUTPUT_FILE_NAME = 'tests/src/psa_test_wrappers.c'
+DEFAULT_H_OUTPUT_FILE_NAME = 'tests/include/test/psa_test_wrappers.h'
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ parser.add_argument('--log',
+ help='Stream to log to (default: no logging code)')
+ parser.add_argument('--output-c',
+ metavar='FILENAME',
+ default=DEFAULT_C_OUTPUT_FILE_NAME,
+ help=('Output .c file path (default: {}; skip .c output if empty)'
+ .format(DEFAULT_C_OUTPUT_FILE_NAME)))
+ parser.add_argument('--output-h',
+ metavar='FILENAME',
+ default=DEFAULT_H_OUTPUT_FILE_NAME,
+ help=('Output .h file path (default: {}; skip .h output if empty)'
+ .format(DEFAULT_H_OUTPUT_FILE_NAME)))
+ options = parser.parse_args()
+ if options.log:
+ generator = PSALoggingWrapperGenerator(options.log) #type: PSAWrapperGenerator
+ else:
+ generator = PSAWrapperGenerator()
+ generator.gather_data()
+ if options.output_h:
+ generator.write_h_file(options.output_h)
+ if options.output_c:
+ generator.write_c_file(options.output_c)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/generate_server9_bad_saltlen.py b/tests/scripts/generate_server9_bad_saltlen.py
new file mode 100755
index 0000000..9af4dd3
--- /dev/null
+++ b/tests/scripts/generate_server9_bad_saltlen.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python3
+"""Generate server9-bad-saltlen.crt
+
+Generate a certificate signed with RSA-PSS, with an incorrect salt length.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import subprocess
+import argparse
+from asn1crypto import pem, x509, core #type: ignore #pylint: disable=import-error
+
+OPENSSL_RSA_PSS_CERT_COMMAND = r'''
+openssl x509 -req -CA {ca_name}.crt -CAkey {ca_name}.key -set_serial 24 {ca_password} \
+ {openssl_extfile} -days 3650 -outform DER -in {csr} \
+ -sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{anounce_saltlen} \
+ -sigopt rsa_mgf1_md:sha256
+'''
+SIG_OPT = \
+ r'-sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{saltlen} -sigopt rsa_mgf1_md:sha256'
+OPENSSL_RSA_PSS_DGST_COMMAND = r'''openssl dgst -sign {ca_name}.key {ca_password} \
+ -sigopt rsa_padding_mode:pss -sigopt rsa_pss_saltlen:{actual_saltlen} \
+ -sigopt rsa_mgf1_md:sha256'''
+
+
+def auto_int(x):
+ return int(x, 0)
+
+
+def build_argparser(parser):
+ """Build argument parser"""
+ parser.description = __doc__
+ parser.add_argument('--ca-name', type=str, required=True,
+ help='Basename of CA files')
+ parser.add_argument('--ca-password', type=str,
+ required=True, help='CA key file password')
+ parser.add_argument('--csr', type=str, required=True,
+ help='CSR file for generating certificate')
+ parser.add_argument('--openssl-extfile', type=str,
+ required=True, help='X905 v3 extension config file')
+ parser.add_argument('--anounce_saltlen', type=auto_int,
+ required=True, help='Announced salt length')
+ parser.add_argument('--actual_saltlen', type=auto_int,
+ required=True, help='Actual salt length')
+ parser.add_argument('--output', type=str, required=True)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ build_argparser(parser)
+ args = parser.parse_args()
+
+ return generate(**vars(args))
+
+def generate(**kwargs):
+ """Generate different salt length certificate file."""
+ ca_password = kwargs.get('ca_password', '')
+ if ca_password:
+ kwargs['ca_password'] = r'-passin "pass:{ca_password}"'.format(
+ **kwargs)
+ else:
+ kwargs['ca_password'] = ''
+ extfile = kwargs.get('openssl_extfile', '')
+ if extfile:
+ kwargs['openssl_extfile'] = '-extfile {openssl_extfile}'.format(
+ **kwargs)
+ else:
+ kwargs['openssl_extfile'] = ''
+
+ cmd = OPENSSL_RSA_PSS_CERT_COMMAND.format(**kwargs)
+ der_bytes = subprocess.check_output(cmd, shell=True)
+ target_certificate = x509.Certificate.load(der_bytes)
+
+ cmd = OPENSSL_RSA_PSS_DGST_COMMAND.format(**kwargs)
+ #pylint: disable=unexpected-keyword-arg
+ der_bytes = subprocess.check_output(cmd,
+ input=target_certificate['tbs_certificate'].dump(),
+ shell=True)
+
+ with open(kwargs.get('output'), 'wb') as f:
+ target_certificate['signature_value'] = core.OctetBitString(der_bytes)
+ f.write(pem.armor('CERTIFICATE', target_certificate.dump()))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/generate_test_cert_macros.py b/tests/scripts/generate_test_cert_macros.py
new file mode 100755
index 0000000..a3bca7e
--- /dev/null
+++ b/tests/scripts/generate_test_cert_macros.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python3
+
+"""
+Generate `tests/src/test_certs.h` which includes certficaties/keys/certificate list for testing.
+"""
+
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+
+import os
+import sys
+import argparse
+import jinja2
+
+class MacroDefineAction(argparse.Action):
+ #pylint: disable=signature-differs, too-few-public-methods
+ def __call__(self, parser, namespace, values, option_string):
+ if not hasattr(namespace, 'values'):
+ setattr(namespace, 'values', [])
+ macro_name, filename = values
+ if self.dest in ('string', 'binary') and not os.path.exists(filename):
+ raise argparse.ArgumentError(
+ None, '`{}`: Input file does not exist.'.format(filename))
+ namespace.values.append((self.dest, macro_name, filename))
+
+
+def macro_define_type(value):
+ ret = value.split('=', 1)
+ if len(ret) != 2:
+ raise argparse.ArgumentTypeError(
+ '`{}` is not MACRO=value format'.format(value))
+ return ret
+
+
+def build_argparser(parser):
+ parser.description = __doc__
+ parser.add_argument('--string', type=macro_define_type, action=MacroDefineAction,
+ metavar='MACRO_NAME=path/to/file', help='PEM to C string. ')
+ parser.add_argument('--binary', type=macro_define_type, action=MacroDefineAction,
+ metavar='MACRO_NAME=path/to/file',
+ help='DER to C arrary.')
+ parser.add_argument('--password', type=macro_define_type, action=MacroDefineAction,
+ metavar='MACRO_NAME=password', help='Password to C string.')
+ parser.add_argument('--output', type=str, required=True)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ build_argparser(parser)
+ args = parser.parse_args()
+ return generate(**vars(args))
+
+#pylint: disable=dangerous-default-value, unused-argument
+def generate(values=[], output=None, **kwargs):
+ """Generate C header file.
+ """
+ this_dir = os.path.dirname(os.path.abspath(__file__))
+ template_loader = jinja2.FileSystemLoader(
+ searchpath=os.path.join(this_dir, '..', 'data_files'))
+ template_env = jinja2.Environment(
+ loader=template_loader, lstrip_blocks=True, trim_blocks=True)
+
+ def read_as_c_array(filename):
+ with open(filename, 'rb') as f:
+ data = f.read(12)
+ while data:
+ yield ', '.join(['{:#04x}'.format(b) for b in data])
+ data = f.read(12)
+
+ def read_lines(filename):
+ with open(filename) as f:
+ try:
+ for line in f:
+ yield line.strip()
+ except:
+ print(filename)
+ raise
+
+ def put_to_column(value, position=0):
+ return ' '*position + value
+
+ template_env.filters['read_as_c_array'] = read_as_c_array
+ template_env.filters['read_lines'] = read_lines
+ template_env.filters['put_to_column'] = put_to_column
+
+ template = template_env.get_template('test_certs.h.jinja2')
+
+ with open(output, 'w') as f:
+ f.write(template.render(macros=values))
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tests/scripts/generate_test_code.py b/tests/scripts/generate_test_code.py
new file mode 100755
index 0000000..5f711bf
--- /dev/null
+++ b/tests/scripts/generate_test_code.py
@@ -0,0 +1,1277 @@
+#!/usr/bin/env python3
+# Test suites code generator.
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script is a key part of Mbed TLS test suites framework. For
+understanding the script it is important to understand the
+framework. This doc string contains a summary of the framework
+and explains the function of this script.
+
+Mbed TLS test suites:
+=====================
+Scope:
+------
+The test suites focus on unit testing the crypto primitives and also
+include x509 parser tests. Tests can be added to test any Mbed TLS
+module. However, the framework is not capable of testing SSL
+protocol, since that requires full stack execution and that is best
+tested as part of the system test.
+
+Test case definition:
+---------------------
+Tests are defined in a test_suite_<module>[.<optional sub module>].data
+file. A test definition contains:
+ test name
+ optional build macro dependencies
+ test function
+ test parameters
+
+Test dependencies are build macros that can be specified to indicate
+the build config in which the test is valid. For example if a test
+depends on a feature that is only enabled by defining a macro. Then
+that macro should be specified as a dependency of the test.
+
+Test function is the function that implements the test steps. This
+function is specified for different tests that perform same steps
+with different parameters.
+
+Test parameters are specified in string form separated by ':'.
+Parameters can be of type string, binary data specified as hex
+string and integer constants specified as integer, macro or
+as an expression. Following is an example test definition:
+
+ AES 128 GCM Encrypt and decrypt 8 bytes
+ depends_on:MBEDTLS_AES_C:MBEDTLS_GCM_C
+ enc_dec_buf:MBEDTLS_CIPHER_AES_128_GCM:"AES-128-GCM":128:8:-1
+
+Test functions:
+---------------
+Test functions are coded in C in test_suite_<module>.function files.
+Functions file is itself not compilable and contains special
+format patterns to specify test suite dependencies, start and end
+of functions and function dependencies. Check any existing functions
+file for example.
+
+Execution:
+----------
+Tests are executed in 3 steps:
+- Generating test_suite_<module>[.<optional sub module>].c file
+ for each corresponding .data file.
+- Building each source file into executables.
+- Running each executable and printing report.
+
+Generating C test source requires more than just the test functions.
+Following extras are required:
+- Process main()
+- Reading .data file and dispatching test cases.
+- Platform specific test case execution
+- Dependency checking
+- Integer expression evaluation
+- Test function dispatch
+
+Build dependencies and integer expressions (in the test parameters)
+are specified as strings in the .data file. Their run time value is
+not known at the generation stage. Hence, they need to be translated
+into run time evaluations. This script generates the run time checks
+for dependencies and integer expressions.
+
+Similarly, function names have to be translated into function calls.
+This script also generates code for function dispatch.
+
+The extra code mentioned here is either generated by this script
+or it comes from the input files: helpers file, platform file and
+the template file.
+
+Helper file:
+------------
+Helpers file contains common helper/utility functions and data.
+
+Platform file:
+--------------
+Platform file contains platform specific setup code and test case
+dispatch code. For example, host_test.function reads test data
+file from host's file system and dispatches tests.
+
+Template file:
+---------
+Template file for example main_test.function is a template C file in
+which generated code and code from input files is substituted to
+generate a compilable C file. It also contains skeleton functions for
+dependency checks, expression evaluation and function dispatch. These
+functions are populated with checks and return codes by this script.
+
+Template file contains "replacement" fields that are formatted
+strings processed by Python string.Template.substitute() method.
+
+This script:
+============
+Core function of this script is to fill the template file with
+code that is generated or read from helpers and platform files.
+
+This script replaces following fields in the template and generates
+the test source file:
+
+__MBEDTLS_TEST_TEMPLATE__TEST_COMMON_HELPERS
+ All common code from helpers.function
+ is substituted here.
+__MBEDTLS_TEST_TEMPLATE__FUNCTIONS_CODE
+ Test functions are substituted here
+ from the input test_suit_xyz.function
+ file. C preprocessor checks are generated
+ for the build dependencies specified
+ in the input file. This script also
+ generates wrappers for the test
+ functions with code to expand the
+ string parameters read from the data
+ file.
+__MBEDTLS_TEST_TEMPLATE__EXPRESSION_CODE
+ This script enumerates the
+ expressions in the .data file and
+ generates code to handle enumerated
+ expression Ids and return the values.
+__MBEDTLS_TEST_TEMPLATE__DEP_CHECK_CODE
+ This script enumerates all
+ build dependencies and generate
+ code to handle enumerated build
+ dependency Id and return status: if
+ the dependency is defined or not.
+__MBEDTLS_TEST_TEMPLATE__DISPATCH_CODE
+ This script enumerates the functions
+ specified in the input test data file
+ and generates the initializer for the
+ function table in the template
+ file.
+__MBEDTLS_TEST_TEMPLATE__PLATFORM_CODE
+ Platform specific setup and test
+ dispatch code.
+
+"""
+
+
+import os
+import re
+import sys
+import string
+import argparse
+
+
+# Types recognized as signed integer arguments in test functions.
+SIGNED_INTEGER_TYPES = frozenset([
+ 'char',
+ 'short',
+ 'short int',
+ 'int',
+ 'int8_t',
+ 'int16_t',
+ 'int32_t',
+ 'int64_t',
+ 'intmax_t',
+ 'long',
+ 'long int',
+ 'long long int',
+ 'mbedtls_mpi_sint',
+ 'psa_status_t',
+])
+# Types recognized as string arguments in test functions.
+STRING_TYPES = frozenset(['char*', 'const char*', 'char const*'])
+# Types recognized as hex data arguments in test functions.
+DATA_TYPES = frozenset(['data_t*', 'const data_t*', 'data_t const*'])
+
+BEGIN_HEADER_REGEX = r'/\*\s*BEGIN_HEADER\s*\*/'
+END_HEADER_REGEX = r'/\*\s*END_HEADER\s*\*/'
+
+BEGIN_SUITE_HELPERS_REGEX = r'/\*\s*BEGIN_SUITE_HELPERS\s*\*/'
+END_SUITE_HELPERS_REGEX = r'/\*\s*END_SUITE_HELPERS\s*\*/'
+
+BEGIN_DEP_REGEX = r'BEGIN_DEPENDENCIES'
+END_DEP_REGEX = r'END_DEPENDENCIES'
+
+BEGIN_CASE_REGEX = r'/\*\s*BEGIN_CASE\s*(?P<depends_on>.*?)\s*\*/'
+END_CASE_REGEX = r'/\*\s*END_CASE\s*\*/'
+
+DEPENDENCY_REGEX = r'depends_on:(?P<dependencies>.*)'
+C_IDENTIFIER_REGEX = r'!?[a-z_][a-z0-9_]*'
+CONDITION_OPERATOR_REGEX = r'[!=]=|[<>]=?'
+# forbid 0ddd which might be accidentally octal or accidentally decimal
+CONDITION_VALUE_REGEX = r'[-+]?(0x[0-9a-f]+|0|[1-9][0-9]*)'
+CONDITION_REGEX = r'({})(?:\s*({})\s*({}))?$'.format(C_IDENTIFIER_REGEX,
+ CONDITION_OPERATOR_REGEX,
+ CONDITION_VALUE_REGEX)
+TEST_FUNCTION_VALIDATION_REGEX = r'\s*void\s+(?P<func_name>\w+)\s*\('
+FUNCTION_ARG_LIST_END_REGEX = r'.*\)'
+EXIT_LABEL_REGEX = r'^exit:'
+
+
+class GeneratorInputError(Exception):
+ """
+ Exception to indicate error in the input files to this script.
+ This includes missing patterns, test function names and other
+ parsing errors.
+ """
+ pass
+
+
+class FileWrapper:
+ """
+ This class extends the file object with attribute line_no,
+ that indicates line number for the line that is read.
+ """
+
+ def __init__(self, file_name) -> None:
+ """
+ Instantiate the file object and initialize the line number to 0.
+
+ :param file_name: File path to open.
+ """
+ # private mix-in file object
+ self._f = open(file_name, 'rb')
+ self._line_no = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ """
+ This method makes FileWrapper iterable.
+ It counts the line numbers as each line is read.
+
+ :return: Line read from file.
+ """
+ line = self._f.__next__()
+ self._line_no += 1
+ # Convert byte array to string with correct encoding and
+ # strip any whitespaces added in the decoding process.
+ return line.decode(sys.getdefaultencoding()).rstrip()+ '\n'
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._f.__exit__(exc_type, exc_val, exc_tb)
+
+ @property
+ def line_no(self):
+ """
+ Property that indicates line number for the line that is read.
+ """
+ return self._line_no
+
+ @property
+ def name(self):
+ """
+ Property that indicates name of the file that is read.
+ """
+ return self._f.name
+
+
+def split_dep(dep):
+ """
+ Split NOT character '!' from dependency. Used by gen_dependencies()
+
+ :param dep: Dependency list
+ :return: string tuple. Ex: ('!', MACRO) for !MACRO and ('', MACRO) for
+ MACRO.
+ """
+ return ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+
+
+def gen_dependencies(dependencies):
+ """
+ Test suite data and functions specifies compile time dependencies.
+ This function generates C preprocessor code from the input
+ dependency list. Caller uses the generated preprocessor code to
+ wrap dependent code.
+ A dependency in the input list can have a leading '!' character
+ to negate a condition. '!' is separated from the dependency using
+ function split_dep() and proper preprocessor check is generated
+ accordingly.
+
+ :param dependencies: List of dependencies.
+ :return: if defined and endif code with macro annotations for
+ readability.
+ """
+ dep_start = ''.join(['#if %sdefined(%s)\n' % (x, y) for x, y in
+ map(split_dep, dependencies)])
+ dep_end = ''.join(['#endif /* %s */\n' %
+ x for x in reversed(dependencies)])
+
+ return dep_start, dep_end
+
+
+def gen_dependencies_one_line(dependencies):
+ """
+ Similar to gen_dependencies() but generates dependency checks in one line.
+ Useful for generating code with #else block.
+
+ :param dependencies: List of dependencies.
+ :return: Preprocessor check code
+ """
+ defines = '#if ' if dependencies else ''
+ defines += ' && '.join(['%sdefined(%s)' % (x, y) for x, y in map(
+ split_dep, dependencies)])
+ return defines
+
+
+def gen_function_wrapper(name, local_vars, args_dispatch):
+ """
+ Creates test function wrapper code. A wrapper has the code to
+ unpack parameters from parameters[] array.
+
+ :param name: Test function name
+ :param local_vars: Local variables declaration code
+ :param args_dispatch: List of dispatch arguments.
+ Ex: ['(char *) params[0]', '*((int *) params[1])']
+ :return: Test function wrapper.
+ """
+ # Then create the wrapper
+ wrapper = '''
+void {name}_wrapper( void ** params )
+{{
+{unused_params}{locals}
+ {name}( {args} );
+}}
+'''.format(name=name,
+ unused_params='' if args_dispatch else ' (void)params;\n',
+ args=', '.join(args_dispatch),
+ locals=local_vars)
+ return wrapper
+
+
+def gen_dispatch(name, dependencies):
+ """
+ Test suite code template main_test.function defines a C function
+ array to contain test case functions. This function generates an
+ initializer entry for a function in that array. The entry is
+ composed of a compile time check for the test function
+ dependencies. At compile time the test function is assigned when
+ dependencies are met, else NULL is assigned.
+
+ :param name: Test function name
+ :param dependencies: List of dependencies
+ :return: Dispatch code.
+ """
+ if dependencies:
+ preprocessor_check = gen_dependencies_one_line(dependencies)
+ dispatch_code = '''
+{preprocessor_check}
+ {name}_wrapper,
+#else
+ NULL,
+#endif
+'''.format(preprocessor_check=preprocessor_check, name=name)
+ else:
+ dispatch_code = '''
+ {name}_wrapper,
+'''.format(name=name)
+
+ return dispatch_code
+
+
+def parse_until_pattern(funcs_f, end_regex):
+ """
+ Matches pattern end_regex to the lines read from the file object.
+ Returns the lines read until end pattern is matched.
+
+ :param funcs_f: file object for .function file
+ :param end_regex: Pattern to stop parsing
+ :return: Lines read before the end pattern
+ """
+ headers = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+ for line in funcs_f:
+ if re.search(end_regex, line):
+ break
+ headers += line
+ else:
+ raise GeneratorInputError("file: %s - end pattern [%s] not found!" %
+ (funcs_f.name, end_regex))
+
+ return headers
+
+
+def validate_dependency(dependency):
+ """
+ Validates a C macro and raises GeneratorInputError on invalid input.
+ :param dependency: Input macro dependency
+ :return: input dependency stripped of leading & trailing white spaces.
+ """
+ dependency = dependency.strip()
+ if not re.match(CONDITION_REGEX, dependency, re.I):
+ raise GeneratorInputError('Invalid dependency %s' % dependency)
+ return dependency
+
+
+def parse_dependencies(inp_str):
+ """
+ Parses dependencies out of inp_str, validates them and returns a
+ list of macros.
+
+ :param inp_str: Input string with macros delimited by ':'.
+ :return: list of dependencies
+ """
+ dependencies = list(map(validate_dependency, inp_str.split(':')))
+ return dependencies
+
+
+def parse_suite_dependencies(funcs_f):
+ """
+ Parses test suite dependencies specified at the top of a
+ .function file, that starts with pattern BEGIN_DEPENDENCIES
+ and end with END_DEPENDENCIES. Dependencies are specified
+ after pattern 'depends_on:' and are delimited by ':'.
+
+ :param funcs_f: file object for .function file
+ :return: List of test suite dependencies.
+ """
+ dependencies = []
+ for line in funcs_f:
+ match = re.search(DEPENDENCY_REGEX, line.strip())
+ if match:
+ try:
+ dependencies = parse_dependencies(match.group('dependencies'))
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ str(error) + " - %s:%d" % (funcs_f.name, funcs_f.line_no))
+ if re.search(END_DEP_REGEX, line):
+ break
+ else:
+ raise GeneratorInputError("file: %s - end dependency pattern [%s]"
+ " not found!" % (funcs_f.name,
+ END_DEP_REGEX))
+
+ return dependencies
+
+
+def parse_function_dependencies(line):
+ """
+ Parses function dependencies, that are in the same line as
+ comment BEGIN_CASE. Dependencies are specified after pattern
+ 'depends_on:' and are delimited by ':'.
+
+ :param line: Line from .function file that has dependencies.
+ :return: List of dependencies.
+ """
+ dependencies = []
+ match = re.search(BEGIN_CASE_REGEX, line)
+ dep_str = match.group('depends_on')
+ if dep_str:
+ match = re.search(DEPENDENCY_REGEX, dep_str)
+ if match:
+ dependencies += parse_dependencies(match.group('dependencies'))
+
+ return dependencies
+
+
+ARGUMENT_DECLARATION_REGEX = re.compile(r'(.+?) ?(?:\bconst\b)? ?(\w+)\Z', re.S)
+def parse_function_argument(arg, arg_idx, args, local_vars, args_dispatch):
+ """
+ Parses one test function's argument declaration.
+
+ :param arg: argument declaration.
+ :param arg_idx: current wrapper argument index.
+ :param args: accumulator of arguments' internal types.
+ :param local_vars: accumulator of internal variable declarations.
+ :param args_dispatch: accumulator of argument usage expressions.
+ :return: the number of new wrapper arguments,
+ or None if the argument declaration is invalid.
+ """
+ # Normalize whitespace
+ arg = arg.strip()
+ arg = re.sub(r'\s*\*\s*', r'*', arg)
+ arg = re.sub(r'\s+', r' ', arg)
+ # Extract name and type
+ m = ARGUMENT_DECLARATION_REGEX.search(arg)
+ if not m:
+ # E.g. "int x[42]"
+ return None
+ typ, _ = m.groups()
+ if typ in SIGNED_INTEGER_TYPES:
+ args.append('int')
+ args_dispatch.append('((mbedtls_test_argument_t *) params[%d])->sint' % arg_idx)
+ return 1
+ if typ in STRING_TYPES:
+ args.append('char*')
+ args_dispatch.append('(char *) params[%d]' % arg_idx)
+ return 1
+ if typ in DATA_TYPES:
+ args.append('hex')
+ # create a structure
+ pointer_initializer = '(uint8_t *) params[%d]' % arg_idx
+ len_initializer = '((mbedtls_test_argument_t *) params[%d])->len' % (arg_idx+1)
+ local_vars.append(' data_t data%d = {%s, %s};\n' %
+ (arg_idx, pointer_initializer, len_initializer))
+ args_dispatch.append('&data%d' % arg_idx)
+ return 2
+ return None
+
+ARGUMENT_LIST_REGEX = re.compile(r'\((.*?)\)', re.S)
+def parse_function_arguments(line):
+ """
+ Parses test function signature for validation and generates
+ a dispatch wrapper function that translates input test vectors
+ read from the data file into test function arguments.
+
+ :param line: Line from .function file that has a function
+ signature.
+ :return: argument list, local variables for
+ wrapper function and argument dispatch code.
+ """
+ # Process arguments, ex: <type> arg1, <type> arg2 )
+ # This script assumes that the argument list is terminated by ')'
+ # i.e. the test functions will not have a function pointer
+ # argument.
+ m = ARGUMENT_LIST_REGEX.search(line)
+ arg_list = m.group(1).strip()
+ if arg_list in ['', 'void']:
+ return [], '', []
+ args = []
+ local_vars = []
+ args_dispatch = []
+ arg_idx = 0
+ for arg in arg_list.split(','):
+ indexes = parse_function_argument(arg, arg_idx,
+ args, local_vars, args_dispatch)
+ if indexes is None:
+ raise ValueError("Test function arguments can only be 'int', "
+ "'char *' or 'data_t'\n%s" % line)
+ arg_idx += indexes
+
+ return args, ''.join(local_vars), args_dispatch
+
+
+def generate_function_code(name, code, local_vars, args_dispatch,
+ dependencies):
+ """
+ Generate function code with preprocessor checks and parameter dispatch
+ wrapper.
+
+ :param name: Function name
+ :param code: Function code
+ :param local_vars: Local variables for function wrapper
+ :param args_dispatch: Argument dispatch code
+ :param dependencies: Preprocessor dependencies list
+ :return: Final function code
+ """
+ # Add exit label if not present
+ if code.find('exit:') == -1:
+ split_code = code.rsplit('}', 1)
+ if len(split_code) == 2:
+ code = """exit:
+ ;
+}""".join(split_code)
+
+ code += gen_function_wrapper(name, local_vars, args_dispatch)
+ preprocessor_check_start, preprocessor_check_end = \
+ gen_dependencies(dependencies)
+ return preprocessor_check_start + code + preprocessor_check_end
+
+COMMENT_START_REGEX = re.compile(r'/[*/]')
+
+def skip_comments(line, stream):
+ """Remove comments in line.
+
+ If the line contains an unfinished comment, read more lines from stream
+ until the line that contains the comment.
+
+ :return: The original line with inner comments replaced by spaces.
+ Trailing comments and whitespace may be removed completely.
+ """
+ pos = 0
+ while True:
+ opening = COMMENT_START_REGEX.search(line, pos)
+ if not opening:
+ break
+ if line[opening.start(0) + 1] == '/': # //...
+ continuation = line
+ # Count the number of line breaks, to keep line numbers aligned
+ # in the output.
+ line_count = 1
+ while continuation.endswith('\\\n'):
+ # This errors out if the file ends with an unfinished line
+ # comment. That's acceptable to not complicate the code further.
+ continuation = next(stream)
+ line_count += 1
+ return line[:opening.start(0)].rstrip() + '\n' * line_count
+ # Parsing /*...*/, looking for the end
+ closing = line.find('*/', opening.end(0))
+ while closing == -1:
+ # This errors out if the file ends with an unfinished block
+ # comment. That's acceptable to not complicate the code further.
+ line += next(stream)
+ closing = line.find('*/', opening.end(0))
+ pos = closing + 2
+ # Replace inner comment by spaces. There needs to be at least one space
+ # for things like 'int/*ihatespaces*/foo'. Go further and preserve the
+ # width of the comment and line breaks, this way positions in error
+ # messages remain correct.
+ line = (line[:opening.start(0)] +
+ re.sub(r'.', r' ', line[opening.start(0):pos]) +
+ line[pos:])
+ # Strip whitespace at the end of lines (it's irrelevant to error messages).
+ return re.sub(r' +(\n|\Z)', r'\1', line)
+
+def parse_function_code(funcs_f, dependencies, suite_dependencies):
+ """
+ Parses out a function from function file object and generates
+ function and dispatch code.
+
+ :param funcs_f: file object of the functions file.
+ :param dependencies: List of dependencies
+ :param suite_dependencies: List of test suite dependencies
+ :return: Function name, arguments, function code and dispatch code.
+ """
+ line_directive = '#line %d "%s"\n' % (funcs_f.line_no + 1, funcs_f.name)
+ code = ''
+ has_exit_label = False
+ for line in funcs_f:
+ # Check function signature. Function signature may be split
+ # across multiple lines. Here we try to find the start of
+ # arguments list, then remove '\n's and apply the regex to
+ # detect function start.
+ line = skip_comments(line, funcs_f)
+ up_to_arg_list_start = code + line[:line.find('(') + 1]
+ match = re.match(TEST_FUNCTION_VALIDATION_REGEX,
+ up_to_arg_list_start.replace('\n', ' '), re.I)
+ if match:
+ # check if we have full signature i.e. split in more lines
+ name = match.group('func_name')
+ if not re.match(FUNCTION_ARG_LIST_END_REGEX, line):
+ for lin in funcs_f:
+ line += skip_comments(lin, funcs_f)
+ if re.search(FUNCTION_ARG_LIST_END_REGEX, line):
+ break
+ args, local_vars, args_dispatch = parse_function_arguments(
+ line)
+ code += line
+ break
+ code += line
+ else:
+ raise GeneratorInputError("file: %s - Test functions not found!" %
+ funcs_f.name)
+
+ # Prefix test function name with 'test_'
+ code = code.replace(name, 'test_' + name, 1)
+ name = 'test_' + name
+
+ # If a test function has no arguments then add 'void' argument to
+ # avoid "-Wstrict-prototypes" warnings from clang
+ if len(args) == 0:
+ code = code.replace('()', '(void)', 1)
+
+ for line in funcs_f:
+ if re.search(END_CASE_REGEX, line):
+ break
+ if not has_exit_label:
+ has_exit_label = \
+ re.search(EXIT_LABEL_REGEX, line.strip()) is not None
+ code += line
+ else:
+ raise GeneratorInputError("file: %s - end case pattern [%s] not "
+ "found!" % (funcs_f.name, END_CASE_REGEX))
+
+ code = line_directive + code
+ code = generate_function_code(name, code, local_vars, args_dispatch,
+ dependencies)
+ dispatch_code = gen_dispatch(name, suite_dependencies + dependencies)
+ return (name, args, code, dispatch_code)
+
+
+def parse_functions(funcs_f):
+ """
+ Parses a test_suite_xxx.function file and returns information
+ for generating a C source file for the test suite.
+
+ :param funcs_f: file object of the functions file.
+ :return: List of test suite dependencies, test function dispatch
+ code, function code and a dict with function identifiers
+ and arguments info.
+ """
+ suite_helpers = ''
+ suite_dependencies = []
+ suite_functions = ''
+ func_info = {}
+ function_idx = 0
+ dispatch_code = ''
+ for line in funcs_f:
+ if re.search(BEGIN_HEADER_REGEX, line):
+ suite_helpers += parse_until_pattern(funcs_f, END_HEADER_REGEX)
+ elif re.search(BEGIN_SUITE_HELPERS_REGEX, line):
+ suite_helpers += parse_until_pattern(funcs_f,
+ END_SUITE_HELPERS_REGEX)
+ elif re.search(BEGIN_DEP_REGEX, line):
+ suite_dependencies += parse_suite_dependencies(funcs_f)
+ elif re.search(BEGIN_CASE_REGEX, line):
+ try:
+ dependencies = parse_function_dependencies(line)
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ "%s:%d: %s" % (funcs_f.name, funcs_f.line_no,
+ str(error)))
+ func_name, args, func_code, func_dispatch =\
+ parse_function_code(funcs_f, dependencies, suite_dependencies)
+ suite_functions += func_code
+ # Generate dispatch code and enumeration info
+ if func_name in func_info:
+ raise GeneratorInputError(
+ "file: %s - function %s re-declared at line %d" %
+ (funcs_f.name, func_name, funcs_f.line_no))
+ func_info[func_name] = (function_idx, args)
+ dispatch_code += '/* Function Id: %d */\n' % function_idx
+ dispatch_code += func_dispatch
+ function_idx += 1
+
+ func_code = (suite_helpers +
+ suite_functions).join(gen_dependencies(suite_dependencies))
+ return suite_dependencies, dispatch_code, func_code, func_info
+
+
+def escaped_split(inp_str, split_char):
+ """
+ Split inp_str on character split_char but ignore if escaped.
+ Since, return value is used to write back to the intermediate
+ data file, any escape characters in the input are retained in the
+ output.
+
+ :param inp_str: String to split
+ :param split_char: Split character
+ :return: List of splits
+ """
+ if len(split_char) > 1:
+ raise ValueError('Expected split character. Found string!')
+ out = re.sub(r'(\\.)|' + split_char,
+ lambda m: m.group(1) or '\n', inp_str,
+ len(inp_str)).split('\n')
+ out = [x for x in out if x]
+ return out
+
+
+def parse_test_data(data_f):
+ """
+ Parses .data file for each test case name, test function name,
+ test dependencies and test arguments. This information is
+ correlated with the test functions file for generating an
+ intermediate data file replacing the strings for test function
+ names, dependencies and integer constant expressions with
+ identifiers. Mainly for optimising space for on-target
+ execution.
+
+ :param data_f: file object of the data file.
+ :return: Generator that yields line number, test name, function name,
+ dependency list and function argument list.
+ """
+ __state_read_name = 0
+ __state_read_args = 1
+ state = __state_read_name
+ dependencies = []
+ name = ''
+ for line in data_f:
+ line = line.strip()
+ # Skip comments
+ if line.startswith('#'):
+ continue
+
+ # Blank line indicates end of test
+ if not line:
+ if state == __state_read_args:
+ raise GeneratorInputError("[%s:%d] Newline before arguments. "
+ "Test function and arguments "
+ "missing for %s" %
+ (data_f.name, data_f.line_no, name))
+ continue
+
+ if state == __state_read_name:
+ # Read test name
+ name = line
+ state = __state_read_args
+ elif state == __state_read_args:
+ # Check dependencies
+ match = re.search(DEPENDENCY_REGEX, line)
+ if match:
+ try:
+ dependencies = parse_dependencies(
+ match.group('dependencies'))
+ except GeneratorInputError as error:
+ raise GeneratorInputError(
+ str(error) + " - %s:%d" %
+ (data_f.name, data_f.line_no))
+ else:
+ # Read test vectors
+ parts = escaped_split(line, ':')
+ test_function = parts[0]
+ args = parts[1:]
+ yield data_f.line_no, name, test_function, dependencies, args
+ dependencies = []
+ state = __state_read_name
+ if state == __state_read_args:
+ raise GeneratorInputError("[%s:%d] Newline before arguments. "
+ "Test function and arguments missing for "
+ "%s" % (data_f.name, data_f.line_no, name))
+
+
+def gen_dep_check(dep_id, dep):
+ """
+ Generate code for checking dependency with the associated
+ identifier.
+
+ :param dep_id: Dependency identifier
+ :param dep: Dependency macro
+ :return: Dependency check code
+ """
+ if dep_id < 0:
+ raise GeneratorInputError("Dependency Id should be a positive "
+ "integer.")
+ _not, dep = ('!', dep[1:]) if dep[0] == '!' else ('', dep)
+ if not dep:
+ raise GeneratorInputError("Dependency should not be an empty string.")
+
+ dependency = re.match(CONDITION_REGEX, dep, re.I)
+ if not dependency:
+ raise GeneratorInputError('Invalid dependency %s' % dep)
+
+ _defined = '' if dependency.group(2) else 'defined'
+ _cond = dependency.group(2) if dependency.group(2) else ''
+ _value = dependency.group(3) if dependency.group(3) else ''
+
+ dep_check = '''
+ case {id}:
+ {{
+#if {_not}{_defined}({macro}{_cond}{_value})
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }}
+ break;'''.format(_not=_not, _defined=_defined,
+ macro=dependency.group(1), id=dep_id,
+ _cond=_cond, _value=_value)
+ return dep_check
+
+
+def gen_expression_check(exp_id, exp):
+ """
+ Generates code for evaluating an integer expression using
+ associated expression Id.
+
+ :param exp_id: Expression Identifier
+ :param exp: Expression/Macro
+ :return: Expression check code
+ """
+ if exp_id < 0:
+ raise GeneratorInputError("Expression Id should be a positive "
+ "integer.")
+ if not exp:
+ raise GeneratorInputError("Expression should not be an empty string.")
+ exp_code = '''
+ case {exp_id}:
+ {{
+ *out_value = {expression};
+ }}
+ break;'''.format(exp_id=exp_id, expression=exp)
+ return exp_code
+
+
+def write_dependencies(out_data_f, test_dependencies, unique_dependencies):
+ """
+ Write dependencies to intermediate test data file, replacing
+ the string form with identifiers. Also, generates dependency
+ check code.
+
+ :param out_data_f: Output intermediate data file
+ :param test_dependencies: Dependencies
+ :param unique_dependencies: Mutable list to track unique dependencies
+ that are global to this re-entrant function.
+ :return: returns dependency check code.
+ """
+ dep_check_code = ''
+ if test_dependencies:
+ out_data_f.write('depends_on')
+ for dep in test_dependencies:
+ if dep not in unique_dependencies:
+ unique_dependencies.append(dep)
+ dep_id = unique_dependencies.index(dep)
+ dep_check_code += gen_dep_check(dep_id, dep)
+ else:
+ dep_id = unique_dependencies.index(dep)
+ out_data_f.write(':' + str(dep_id))
+ out_data_f.write('\n')
+ return dep_check_code
+
+
+INT_VAL_REGEX = re.compile(r'-?(\d+|0x[0-9a-f]+)$', re.I)
+def val_is_int(val: str) -> bool:
+ """Whether val is suitable as an 'int' parameter in the .datax file."""
+ if not INT_VAL_REGEX.match(val):
+ return False
+ # Limit the range to what is guaranteed to get through strtol()
+ return abs(int(val, 0)) <= 0x7fffffff
+
+def write_parameters(out_data_f, test_args, func_args, unique_expressions):
+ """
+ Writes test parameters to the intermediate data file, replacing
+ the string form with identifiers. Also, generates expression
+ check code.
+
+ :param out_data_f: Output intermediate data file
+ :param test_args: Test parameters
+ :param func_args: Function arguments
+ :param unique_expressions: Mutable list to track unique
+ expressions that are global to this re-entrant function.
+ :return: Returns expression check code.
+ """
+ expression_code = ''
+ for i, _ in enumerate(test_args):
+ typ = func_args[i]
+ val = test_args[i]
+
+ # Pass small integer constants literally. This reduces the size of
+ # the C code. Register anything else as an expression.
+ if typ == 'int' and not val_is_int(val):
+ typ = 'exp'
+ if val not in unique_expressions:
+ unique_expressions.append(val)
+ # exp_id can be derived from len(). But for
+ # readability and consistency with case of existing
+ # let's use index().
+ exp_id = unique_expressions.index(val)
+ expression_code += gen_expression_check(exp_id, val)
+ val = exp_id
+ else:
+ val = unique_expressions.index(val)
+ out_data_f.write(':' + typ + ':' + str(val))
+ out_data_f.write('\n')
+ return expression_code
+
+
+def gen_suite_dep_checks(suite_dependencies, dep_check_code, expression_code):
+ """
+ Generates preprocessor checks for test suite dependencies.
+
+ :param suite_dependencies: Test suite dependencies read from the
+ .function file.
+ :param dep_check_code: Dependency check code
+ :param expression_code: Expression check code
+ :return: Dependency and expression code guarded by test suite
+ dependencies.
+ """
+ if suite_dependencies:
+ preprocessor_check = gen_dependencies_one_line(suite_dependencies)
+ dep_check_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=dep_check_code)
+ expression_code = '''
+{preprocessor_check}
+{code}
+#endif
+'''.format(preprocessor_check=preprocessor_check, code=expression_code)
+ return dep_check_code, expression_code
+
+
+def get_function_info(func_info, function_name, line_no):
+ """Look up information about a test function by name.
+
+ Raise an informative expression if function_name is not found.
+
+ :param func_info: dictionary mapping function names to their information.
+ :param function_name: the function name as written in the .function and
+ .data files.
+ :param line_no: line number for error messages.
+ :return Function information (id, args).
+ """
+ test_function_name = 'test_' + function_name
+ if test_function_name not in func_info:
+ raise GeneratorInputError("%d: Function %s not found!" %
+ (line_no, test_function_name))
+ return func_info[test_function_name]
+
+
+def gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies):
+ """
+ This function reads test case name, dependencies and test vectors
+ from the .data file. This information is correlated with the test
+ functions file for generating an intermediate data file replacing
+ the strings for test function names, dependencies and integer
+ constant expressions with identifiers. Mainly for optimising
+ space for on-target execution.
+ It also generates test case dependency check code and expression
+ evaluation code.
+
+ :param data_f: Data file object
+ :param out_data_f: Output intermediate data file
+ :param func_info: Dict keyed by function and with function id
+ and arguments info
+ :param suite_dependencies: Test suite dependencies
+ :return: Returns dependency and expression check code
+ """
+ unique_dependencies = []
+ unique_expressions = []
+ dep_check_code = ''
+ expression_code = ''
+ for line_no, test_name, function_name, test_dependencies, test_args in \
+ parse_test_data(data_f):
+ out_data_f.write(test_name + '\n')
+
+ # Write dependencies
+ dep_check_code += write_dependencies(out_data_f, test_dependencies,
+ unique_dependencies)
+
+ # Write test function name
+ func_id, func_args = \
+ get_function_info(func_info, function_name, line_no)
+ out_data_f.write(str(func_id))
+
+ # Write parameters
+ if len(test_args) != len(func_args):
+ raise GeneratorInputError("%d: Invalid number of arguments in test "
+ "%s. See function %s signature." %
+ (line_no, test_name, function_name))
+ expression_code += write_parameters(out_data_f, test_args, func_args,
+ unique_expressions)
+
+ # Write a newline as test case separator
+ out_data_f.write('\n')
+
+ dep_check_code, expression_code = gen_suite_dep_checks(
+ suite_dependencies, dep_check_code, expression_code)
+ return dep_check_code, expression_code
+
+
+def add_input_info(funcs_file, data_file, template_file,
+ c_file, snippets):
+ """
+ Add generator input info in snippets.
+
+ :param funcs_file: Functions file object
+ :param data_file: Data file object
+ :param template_file: Template file object
+ :param c_file: Output C file object
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ snippets['test_file'] = c_file
+ snippets['test_main_file'] = template_file
+ snippets['test_case_file'] = funcs_file
+ snippets['test_case_data_file'] = data_file
+
+
+def read_code_from_input_files(platform_file, helpers_file,
+ out_data_file, snippets):
+ """
+ Read code from input files and create substitutions for replacement
+ strings in the template file.
+
+ :param platform_file: Platform file object
+ :param helpers_file: Helper functions file object
+ :param out_data_file: Output intermediate data file object
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ # Read helpers
+ with open(helpers_file, 'r') as help_f, open(platform_file, 'r') as \
+ platform_f:
+ snippets['test_common_helper_file'] = helpers_file
+ snippets['test_common_helpers'] = help_f.read()
+ snippets['test_platform_file'] = platform_file
+ snippets['platform_code'] = platform_f.read().replace(
+ 'DATA_FILE', out_data_file.replace('\\', '\\\\')) # escape '\'
+
+
+def write_test_source_file(template_file, c_file, snippets):
+ """
+ Write output source file with generated source code.
+
+ :param template_file: Template file name
+ :param c_file: Output source file
+ :param snippets: Generated and code snippets
+ :return:
+ """
+
+ # Create a placeholder pattern with the correct named capture groups
+ # to override the default provided with Template.
+ # Match nothing (no way of escaping placeholders).
+ escaped = "(?P<escaped>(?!))"
+ # Match the "__MBEDTLS_TEST_TEMPLATE__PLACEHOLDER_NAME" pattern.
+ named = "__MBEDTLS_TEST_TEMPLATE__(?P<named>[A-Z][_A-Z0-9]*)"
+ # Match nothing (no braced placeholder syntax).
+ braced = "(?P<braced>(?!))"
+ # If not already matched, a "__MBEDTLS_TEST_TEMPLATE__" prefix is invalid.
+ invalid = "(?P<invalid>__MBEDTLS_TEST_TEMPLATE__)"
+ placeholder_pattern = re.compile("|".join([escaped, named, braced, invalid]))
+
+ with open(template_file, 'r') as template_f, open(c_file, 'w') as c_f:
+ for line_no, line in enumerate(template_f.readlines(), 1):
+ # Update line number. +1 as #line directive sets next line number
+ snippets['line_no'] = line_no + 1
+ template = string.Template(line)
+ template.pattern = placeholder_pattern
+ snippets = {k.upper():v for (k, v) in snippets.items()}
+ code = template.substitute(**snippets)
+ c_f.write(code)
+
+
+def parse_function_file(funcs_file, snippets):
+ """
+ Parse function file and generate function dispatch code.
+
+ :param funcs_file: Functions file name
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ with FileWrapper(funcs_file) as funcs_f:
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(funcs_f)
+ snippets['functions_code'] = func_code
+ snippets['dispatch_code'] = dispatch_code
+ return suite_dependencies, func_info
+
+
+def generate_intermediate_data_file(data_file, out_data_file,
+ suite_dependencies, func_info, snippets):
+ """
+ Generates intermediate data file from input data file and
+ information read from functions file.
+
+ :param data_file: Data file name
+ :param out_data_file: Output/Intermediate data file
+ :param suite_dependencies: List of suite dependencies.
+ :param func_info: Function info parsed from functions file.
+ :param snippets: Dictionary to contain code pieces to be
+ substituted in the template.
+ :return:
+ """
+ with FileWrapper(data_file) as data_f, \
+ open(out_data_file, 'w') as out_data_f:
+ dep_check_code, expression_code = gen_from_test_data(
+ data_f, out_data_f, func_info, suite_dependencies)
+ snippets['dep_check_code'] = dep_check_code
+ snippets['expression_code'] = expression_code
+
+
+def generate_code(**input_info):
+ """
+ Generates C source code from test suite file, data file, common
+ helpers file and platform file.
+
+ input_info expands to following parameters:
+ funcs_file: Functions file object
+ data_file: Data file object
+ template_file: Template file object
+ platform_file: Platform file object
+ helpers_file: Helper functions file object
+ suites_dir: Test suites dir
+ c_file: Output C file object
+ out_data_file: Output intermediate data file object
+ :return:
+ """
+ funcs_file = input_info['funcs_file']
+ data_file = input_info['data_file']
+ template_file = input_info['template_file']
+ platform_file = input_info['platform_file']
+ helpers_file = input_info['helpers_file']
+ suites_dir = input_info['suites_dir']
+ c_file = input_info['c_file']
+ out_data_file = input_info['out_data_file']
+ for name, path in [('Functions file', funcs_file),
+ ('Data file', data_file),
+ ('Template file', template_file),
+ ('Platform file', platform_file),
+ ('Helpers code file', helpers_file),
+ ('Suites dir', suites_dir)]:
+ if not os.path.exists(path):
+ raise IOError("ERROR: %s [%s] not found!" % (name, path))
+
+ snippets = {'generator_script': os.path.basename(__file__)}
+ read_code_from_input_files(platform_file, helpers_file,
+ out_data_file, snippets)
+ add_input_info(funcs_file, data_file, template_file,
+ c_file, snippets)
+ suite_dependencies, func_info = parse_function_file(funcs_file, snippets)
+ generate_intermediate_data_file(data_file, out_data_file,
+ suite_dependencies, func_info, snippets)
+ write_test_source_file(template_file, c_file, snippets)
+
+
+def main():
+ """
+ Command line parser.
+
+ :return:
+ """
+ parser = argparse.ArgumentParser(
+ description='Dynamically generate test suite code.')
+
+ parser.add_argument("-f", "--functions-file",
+ dest="funcs_file",
+ help="Functions file",
+ metavar="FUNCTIONS_FILE",
+ required=True)
+
+ parser.add_argument("-d", "--data-file",
+ dest="data_file",
+ help="Data file",
+ metavar="DATA_FILE",
+ required=True)
+
+ parser.add_argument("-t", "--template-file",
+ dest="template_file",
+ help="Template file",
+ metavar="TEMPLATE_FILE",
+ required=True)
+
+ parser.add_argument("-s", "--suites-dir",
+ dest="suites_dir",
+ help="Suites dir",
+ metavar="SUITES_DIR",
+ required=True)
+
+ parser.add_argument("--helpers-file",
+ dest="helpers_file",
+ help="Helpers file",
+ metavar="HELPERS_FILE",
+ required=True)
+
+ parser.add_argument("-p", "--platform-file",
+ dest="platform_file",
+ help="Platform code file",
+ metavar="PLATFORM_FILE",
+ required=True)
+
+ parser.add_argument("-o", "--out-dir",
+ dest="out_dir",
+ help="Dir where generated code and scripts are copied",
+ metavar="OUT_DIR",
+ required=True)
+
+ args = parser.parse_args()
+
+ data_file_name = os.path.basename(args.data_file)
+ data_name = os.path.splitext(data_file_name)[0]
+
+ out_c_file = os.path.join(args.out_dir, data_name + '.c')
+ out_data_file = os.path.join(args.out_dir, data_name + '.datax')
+
+ out_c_file_dir = os.path.dirname(out_c_file)
+ out_data_file_dir = os.path.dirname(out_data_file)
+ for directory in [out_c_file_dir, out_data_file_dir]:
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
+ generate_code(funcs_file=args.funcs_file, data_file=args.data_file,
+ template_file=args.template_file,
+ platform_file=args.platform_file,
+ helpers_file=args.helpers_file, suites_dir=args.suites_dir,
+ c_file=out_c_file, out_data_file=out_data_file)
+
+
+if __name__ == "__main__":
+ try:
+ main()
+ except GeneratorInputError as err:
+ sys.exit("%s: input error: %s" %
+ (os.path.basename(sys.argv[0]), str(err)))
diff --git a/tests/scripts/generate_tls13_compat_tests.py b/tests/scripts/generate_tls13_compat_tests.py
new file mode 100755
index 0000000..8b28590
--- /dev/null
+++ b/tests/scripts/generate_tls13_compat_tests.py
@@ -0,0 +1,657 @@
+#!/usr/bin/env python3
+
+# generate_tls13_compat_tests.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Generate TLSv1.3 Compat test cases
+
+"""
+
+import sys
+import os
+import argparse
+import itertools
+from collections import namedtuple
+
+# define certificates configuration entry
+Certificate = namedtuple("Certificate", ['cafile', 'certfile', 'keyfile'])
+# define the certificate parameters for signature algorithms
+CERTIFICATES = {
+ 'ecdsa_secp256r1_sha256': Certificate('data_files/test-ca2.crt',
+ 'data_files/ecdsa_secp256r1.crt',
+ 'data_files/ecdsa_secp256r1.key'),
+ 'ecdsa_secp384r1_sha384': Certificate('data_files/test-ca2.crt',
+ 'data_files/ecdsa_secp384r1.crt',
+ 'data_files/ecdsa_secp384r1.key'),
+ 'ecdsa_secp521r1_sha512': Certificate('data_files/test-ca2.crt',
+ 'data_files/ecdsa_secp521r1.crt',
+ 'data_files/ecdsa_secp521r1.key'),
+ 'rsa_pss_rsae_sha256': Certificate('data_files/test-ca_cat12.crt',
+ 'data_files/server2-sha256.crt', 'data_files/server2.key'
+ )
+}
+
+CIPHER_SUITE_IANA_VALUE = {
+ "TLS_AES_128_GCM_SHA256": 0x1301,
+ "TLS_AES_256_GCM_SHA384": 0x1302,
+ "TLS_CHACHA20_POLY1305_SHA256": 0x1303,
+ "TLS_AES_128_CCM_SHA256": 0x1304,
+ "TLS_AES_128_CCM_8_SHA256": 0x1305
+}
+
+SIG_ALG_IANA_VALUE = {
+ "ecdsa_secp256r1_sha256": 0x0403,
+ "ecdsa_secp384r1_sha384": 0x0503,
+ "ecdsa_secp521r1_sha512": 0x0603,
+ 'rsa_pss_rsae_sha256': 0x0804,
+}
+
+NAMED_GROUP_IANA_VALUE = {
+ 'secp256r1': 0x17,
+ 'secp384r1': 0x18,
+ 'secp521r1': 0x19,
+ 'x25519': 0x1d,
+ 'x448': 0x1e,
+ # Only one finite field group to keep testing time within reasonable bounds.
+ 'ffdhe2048': 0x100,
+}
+
+class TLSProgram:
+ """
+ Base class for generate server/client command.
+ """
+
+ # pylint: disable=too-many-arguments
+ def __init__(self, ciphersuite=None, signature_algorithm=None, named_group=None,
+ cert_sig_alg=None, compat_mode=True):
+ self._ciphers = []
+ self._sig_algs = []
+ self._named_groups = []
+ self._cert_sig_algs = []
+ if ciphersuite:
+ self.add_ciphersuites(ciphersuite)
+ if named_group:
+ self.add_named_groups(named_group)
+ if signature_algorithm:
+ self.add_signature_algorithms(signature_algorithm)
+ if cert_sig_alg:
+ self.add_cert_signature_algorithms(cert_sig_alg)
+ self._compat_mode = compat_mode
+
+ # add_ciphersuites should not override by sub class
+ def add_ciphersuites(self, *ciphersuites):
+ self._ciphers.extend(
+ [cipher for cipher in ciphersuites if cipher not in self._ciphers])
+
+ # add_signature_algorithms should not override by sub class
+ def add_signature_algorithms(self, *signature_algorithms):
+ self._sig_algs.extend(
+ [sig_alg for sig_alg in signature_algorithms if sig_alg not in self._sig_algs])
+
+ # add_named_groups should not override by sub class
+ def add_named_groups(self, *named_groups):
+ self._named_groups.extend(
+ [named_group for named_group in named_groups if named_group not in self._named_groups])
+
+ # add_cert_signature_algorithms should not override by sub class
+ def add_cert_signature_algorithms(self, *signature_algorithms):
+ self._cert_sig_algs.extend(
+ [sig_alg for sig_alg in signature_algorithms if sig_alg not in self._cert_sig_algs])
+
+ # pylint: disable=no-self-use
+ def pre_checks(self):
+ return []
+
+ # pylint: disable=no-self-use
+ def cmd(self):
+ if not self._cert_sig_algs:
+ self._cert_sig_algs = list(CERTIFICATES.keys())
+ return self.pre_cmd()
+
+ # pylint: disable=no-self-use
+ def post_checks(self):
+ return []
+
+ # pylint: disable=no-self-use
+ def pre_cmd(self):
+ return ['false']
+
+ # pylint: disable=unused-argument,no-self-use
+ def hrr_post_checks(self, named_group):
+ return []
+
+
+class OpenSSLBase(TLSProgram):
+ """
+ Generate base test commands for OpenSSL.
+ """
+
+ NAMED_GROUP = {
+ 'secp256r1': 'P-256',
+ 'secp384r1': 'P-384',
+ 'secp521r1': 'P-521',
+ 'x25519': 'X25519',
+ 'x448': 'X448',
+ 'ffdhe2048': 'ffdhe2048',
+ }
+
+ def cmd(self):
+ ret = super().cmd()
+
+ if self._ciphers:
+ ciphersuites = ':'.join(self._ciphers)
+ ret += ["-ciphersuites {ciphersuites}".format(ciphersuites=ciphersuites)]
+
+ if self._sig_algs:
+ signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
+ signature_algorithms = ':'.join(signature_algorithms)
+ ret += ["-sigalgs {signature_algorithms}".format(
+ signature_algorithms=signature_algorithms)]
+
+ if self._named_groups:
+ named_groups = ':'.join(
+ map(lambda named_group: self.NAMED_GROUP[named_group], self._named_groups))
+ ret += ["-groups {named_groups}".format(named_groups=named_groups)]
+
+ ret += ['-msg -tls1_3']
+ if not self._compat_mode:
+ ret += ['-no_middlebox']
+
+ return ret
+
+ def pre_checks(self):
+ ret = ["requires_openssl_tls1_3"]
+
+ # ffdh groups require at least openssl 3.0
+ ffdh_groups = ['ffdhe2048']
+
+ if any(x in ffdh_groups for x in self._named_groups):
+ ret = ["requires_openssl_tls1_3_with_ffdh"]
+
+ return ret
+
+
+class OpenSSLServ(OpenSSLBase):
+ """
+ Generate test commands for OpenSSL server.
+ """
+
+ def cmd(self):
+ ret = super().cmd()
+ ret += ['-num_tickets 0 -no_resume_ephemeral -no_cache']
+ return ret
+
+ def post_checks(self):
+ return ['-c "HTTP/1.0 200 ok"']
+
+ def pre_cmd(self):
+ ret = ['$O_NEXT_SRV_NO_CERT']
+ for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+ ret += ['-cert {cert} -key {key}'.format(cert=cert, key=key)]
+ return ret
+
+
+class OpenSSLCli(OpenSSLBase):
+ """
+ Generate test commands for OpenSSL client.
+ """
+
+ def pre_cmd(self):
+ return ['$O_NEXT_CLI_NO_CERT',
+ '-CAfile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+
+class GnuTLSBase(TLSProgram):
+ """
+ Generate base test commands for GnuTLS.
+ """
+
+ CIPHER_SUITE = {
+ 'TLS_AES_256_GCM_SHA384': [
+ 'AES-256-GCM',
+ 'SHA384',
+ 'AEAD'],
+ 'TLS_AES_128_GCM_SHA256': [
+ 'AES-128-GCM',
+ 'SHA256',
+ 'AEAD'],
+ 'TLS_CHACHA20_POLY1305_SHA256': [
+ 'CHACHA20-POLY1305',
+ 'SHA256',
+ 'AEAD'],
+ 'TLS_AES_128_CCM_SHA256': [
+ 'AES-128-CCM',
+ 'SHA256',
+ 'AEAD'],
+ 'TLS_AES_128_CCM_8_SHA256': [
+ 'AES-128-CCM-8',
+ 'SHA256',
+ 'AEAD']}
+
+ SIGNATURE_ALGORITHM = {
+ 'ecdsa_secp256r1_sha256': ['SIGN-ECDSA-SECP256R1-SHA256'],
+ 'ecdsa_secp521r1_sha512': ['SIGN-ECDSA-SECP521R1-SHA512'],
+ 'ecdsa_secp384r1_sha384': ['SIGN-ECDSA-SECP384R1-SHA384'],
+ 'rsa_pss_rsae_sha256': ['SIGN-RSA-PSS-RSAE-SHA256']}
+
+ NAMED_GROUP = {
+ 'secp256r1': ['GROUP-SECP256R1'],
+ 'secp384r1': ['GROUP-SECP384R1'],
+ 'secp521r1': ['GROUP-SECP521R1'],
+ 'x25519': ['GROUP-X25519'],
+ 'x448': ['GROUP-X448'],
+ 'ffdhe2048': ['GROUP-FFDHE2048'],
+ }
+
+ def pre_checks(self):
+ return ["requires_gnutls_tls1_3",
+ "requires_gnutls_next_no_ticket",
+ "requires_gnutls_next_disable_tls13_compat", ]
+
+ def cmd(self):
+ ret = super().cmd()
+
+ priority_string_list = []
+
+ def update_priority_string_list(items, map_table):
+ for item in items:
+ for i in map_table[item]:
+ if i not in priority_string_list:
+ yield i
+
+ if self._ciphers:
+ priority_string_list.extend(update_priority_string_list(
+ self._ciphers, self.CIPHER_SUITE))
+ else:
+ priority_string_list.extend(['CIPHER-ALL', 'MAC-ALL'])
+
+ if self._sig_algs:
+ signature_algorithms = set(self._sig_algs + self._cert_sig_algs)
+ priority_string_list.extend(update_priority_string_list(
+ signature_algorithms, self.SIGNATURE_ALGORITHM))
+ else:
+ priority_string_list.append('SIGN-ALL')
+
+
+ if self._named_groups:
+ priority_string_list.extend(update_priority_string_list(
+ self._named_groups, self.NAMED_GROUP))
+ else:
+ priority_string_list.append('GROUP-ALL')
+
+ priority_string_list = ['NONE'] + \
+ priority_string_list + ['VERS-TLS1.3']
+
+ priority_string = ':+'.join(priority_string_list)
+ priority_string += ':%NO_TICKETS'
+
+ if not self._compat_mode:
+ priority_string += [':%DISABLE_TLS13_COMPAT_MODE']
+
+ ret += ['--priority={priority_string}'.format(
+ priority_string=priority_string)]
+ return ret
+
+class GnuTLSServ(GnuTLSBase):
+ """
+ Generate test commands for GnuTLS server.
+ """
+
+ def pre_cmd(self):
+ ret = ['$G_NEXT_SRV_NO_CERT', '--http', '--disable-client-cert', '--debug=4']
+
+ for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+ ret += ['--x509certfile {cert} --x509keyfile {key}'.format(
+ cert=cert, key=key)]
+ return ret
+
+ def post_checks(self):
+ return ['-c "HTTP/1.0 200 OK"']
+
+
+class GnuTLSCli(GnuTLSBase):
+ """
+ Generate test commands for GnuTLS client.
+ """
+
+ def pre_cmd(self):
+ return ['$G_NEXT_CLI_NO_CERT', '--debug=4', '--single-key-share',
+ '--x509cafile {cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+
+class MbedTLSBase(TLSProgram):
+ """
+ Generate base test commands for mbedTLS.
+ """
+
+ CIPHER_SUITE = {
+ 'TLS_AES_256_GCM_SHA384': 'TLS1-3-AES-256-GCM-SHA384',
+ 'TLS_AES_128_GCM_SHA256': 'TLS1-3-AES-128-GCM-SHA256',
+ 'TLS_CHACHA20_POLY1305_SHA256': 'TLS1-3-CHACHA20-POLY1305-SHA256',
+ 'TLS_AES_128_CCM_SHA256': 'TLS1-3-AES-128-CCM-SHA256',
+ 'TLS_AES_128_CCM_8_SHA256': 'TLS1-3-AES-128-CCM-8-SHA256'}
+
+ def cmd(self):
+ ret = super().cmd()
+ ret += ['debug_level=4']
+
+
+ if self._ciphers:
+ ciphers = ','.join(
+ map(lambda cipher: self.CIPHER_SUITE[cipher], self._ciphers))
+ ret += ["force_ciphersuite={ciphers}".format(ciphers=ciphers)]
+
+ if self._sig_algs + self._cert_sig_algs:
+ ret += ['sig_algs={sig_algs}'.format(
+ sig_algs=','.join(set(self._sig_algs + self._cert_sig_algs)))]
+
+ if self._named_groups:
+ named_groups = ','.join(self._named_groups)
+ ret += ["groups={named_groups}".format(named_groups=named_groups)]
+ return ret
+
+ #pylint: disable=missing-function-docstring
+ def add_ffdh_group_requirements(self, requirement_list):
+ if 'ffdhe2048' in self._named_groups:
+ requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+ if 'ffdhe3072' in self._named_groups:
+ requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+ if 'ffdhe4096' in self._named_groups:
+ requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+ if 'ffdhe6144' in self._named_groups:
+ requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+ if 'ffdhe8192' in self._named_groups:
+ requirement_list.append('requires_config_enabled PSA_WANT_DH_RFC7919_2048')
+
+ def pre_checks(self):
+ ret = ['requires_config_enabled MBEDTLS_DEBUG_C',
+ 'requires_config_enabled MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_EPHEMERAL_ENABLED']
+
+ if self._compat_mode:
+ ret += ['requires_config_enabled MBEDTLS_SSL_TLS1_3_COMPATIBILITY_MODE']
+
+ if 'rsa_pss_rsae_sha256' in self._sig_algs + self._cert_sig_algs:
+ ret.append(
+ 'requires_config_enabled MBEDTLS_X509_RSASSA_PSS_SUPPORT')
+
+ ec_groups = ['secp256r1', 'secp384r1', 'secp521r1', 'x25519', 'x448']
+ ffdh_groups = ['ffdhe2048', 'ffdhe3072', 'ffdhe4096', 'ffdhe6144', 'ffdhe8192']
+
+ if any(x in ec_groups for x in self._named_groups):
+ ret.append('requires_config_enabled PSA_WANT_ALG_ECDH')
+
+ if any(x in ffdh_groups for x in self._named_groups):
+ ret.append('requires_config_enabled PSA_WANT_ALG_FFDH')
+ self.add_ffdh_group_requirements(ret)
+
+ return ret
+
+
+class MbedTLSServ(MbedTLSBase):
+ """
+ Generate test commands for mbedTLS server.
+ """
+
+ def cmd(self):
+ ret = super().cmd()
+ ret += ['tls13_kex_modes=ephemeral cookies=0 tickets=0']
+ return ret
+
+ def pre_checks(self):
+ return ['requires_config_enabled MBEDTLS_SSL_SRV_C'] + super().pre_checks()
+
+ def post_checks(self):
+ check_strings = ["Protocol is TLSv1.3"]
+ if self._ciphers:
+ check_strings.append(
+ "server hello, chosen ciphersuite: {} ( id={:04d} )".format(
+ self.CIPHER_SUITE[self._ciphers[0]],
+ CIPHER_SUITE_IANA_VALUE[self._ciphers[0]]))
+ if self._sig_algs:
+ check_strings.append(
+ "received signature algorithm: 0x{:x}".format(
+ SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
+
+ for named_group in self._named_groups:
+ check_strings += ['got named group: {named_group}({iana_value:04x})'.format(
+ named_group=named_group,
+ iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
+
+ check_strings.append("Certificate verification was skipped")
+ return ['-s "{}"'.format(i) for i in check_strings]
+
+ def pre_cmd(self):
+ ret = ['$P_SRV']
+ for _, cert, key in map(lambda sig_alg: CERTIFICATES[sig_alg], self._cert_sig_algs):
+ ret += ['crt_file={cert} key_file={key}'.format(cert=cert, key=key)]
+ return ret
+
+ def hrr_post_checks(self, named_group):
+ return ['-s "HRR selected_group: {:s}"'.format(named_group)]
+
+
+class MbedTLSCli(MbedTLSBase):
+ """
+ Generate test commands for mbedTLS client.
+ """
+
+ def pre_cmd(self):
+ return ['$P_CLI',
+ 'ca_file={cafile}'.format(cafile=CERTIFICATES[self._cert_sig_algs[0]].cafile)]
+
+ def pre_checks(self):
+ return ['requires_config_enabled MBEDTLS_SSL_CLI_C'] + super().pre_checks()
+
+ def hrr_post_checks(self, named_group):
+ ret = ['-c "received HelloRetryRequest message"']
+ ret += ['-c "selected_group ( {:d} )"'.format(NAMED_GROUP_IANA_VALUE[named_group])]
+ return ret
+
+ def post_checks(self):
+ check_strings = ["Protocol is TLSv1.3"]
+ if self._ciphers:
+ check_strings.append(
+ "server hello, chosen ciphersuite: ( {:04x} ) - {}".format(
+ CIPHER_SUITE_IANA_VALUE[self._ciphers[0]],
+ self.CIPHER_SUITE[self._ciphers[0]]))
+ if self._sig_algs:
+ check_strings.append(
+ "Certificate Verify: Signature algorithm ( {:04x} )".format(
+ SIG_ALG_IANA_VALUE[self._sig_algs[0]]))
+
+ for named_group in self._named_groups:
+ check_strings += ['NamedGroup: {named_group} ( {iana_value:x} )'.format(
+ named_group=named_group,
+ iana_value=NAMED_GROUP_IANA_VALUE[named_group])]
+
+ check_strings.append("Verifying peer X.509 certificate... ok")
+ return ['-c "{}"'.format(i) for i in check_strings]
+
+
+SERVER_CLASSES = {'OpenSSL': OpenSSLServ, 'GnuTLS': GnuTLSServ, 'mbedTLS': MbedTLSServ}
+CLIENT_CLASSES = {'OpenSSL': OpenSSLCli, 'GnuTLS': GnuTLSCli, 'mbedTLS': MbedTLSCli}
+
+
+def generate_compat_test(client=None, server=None, cipher=None, named_group=None, sig_alg=None):
+ """
+ Generate test case with `ssl-opt.sh` format.
+ """
+ name = 'TLS 1.3 {client[0]}->{server[0]}: {cipher},{named_group},{sig_alg}'.format(
+ client=client, server=server, cipher=cipher[4:], sig_alg=sig_alg, named_group=named_group)
+
+ server_object = SERVER_CLASSES[server](ciphersuite=cipher,
+ named_group=named_group,
+ signature_algorithm=sig_alg,
+ cert_sig_alg=sig_alg)
+ client_object = CLIENT_CLASSES[client](ciphersuite=cipher,
+ named_group=named_group,
+ signature_algorithm=sig_alg,
+ cert_sig_alg=sig_alg)
+
+ cmd = ['run_test "{}"'.format(name),
+ '"{}"'.format(' '.join(server_object.cmd())),
+ '"{}"'.format(' '.join(client_object.cmd())),
+ '0']
+ cmd += server_object.post_checks()
+ cmd += client_object.post_checks()
+ cmd += ['-C "received HelloRetryRequest message"']
+ prefix = ' \\\n' + (' '*9)
+ cmd = prefix.join(cmd)
+ return '\n'.join(server_object.pre_checks() + client_object.pre_checks() + [cmd])
+
+
+def generate_hrr_compat_test(client=None, server=None,
+ client_named_group=None, server_named_group=None,
+ cert_sig_alg=None):
+ """
+ Generate Hello Retry Request test case with `ssl-opt.sh` format.
+ """
+ name = 'TLS 1.3 {client[0]}->{server[0]}: HRR {c_named_group} -> {s_named_group}'.format(
+ client=client, server=server, c_named_group=client_named_group,
+ s_named_group=server_named_group)
+ server_object = SERVER_CLASSES[server](named_group=server_named_group,
+ cert_sig_alg=cert_sig_alg)
+
+ client_object = CLIENT_CLASSES[client](named_group=client_named_group,
+ cert_sig_alg=cert_sig_alg)
+ client_object.add_named_groups(server_named_group)
+
+ cmd = ['run_test "{}"'.format(name),
+ '"{}"'.format(' '.join(server_object.cmd())),
+ '"{}"'.format(' '.join(client_object.cmd())),
+ '0']
+ cmd += server_object.post_checks()
+ cmd += client_object.post_checks()
+ cmd += server_object.hrr_post_checks(server_named_group)
+ cmd += client_object.hrr_post_checks(server_named_group)
+ prefix = ' \\\n' + (' '*9)
+ cmd = prefix.join(cmd)
+ return '\n'.join(server_object.pre_checks() +
+ client_object.pre_checks() +
+ [cmd])
+
+SSL_OUTPUT_HEADER = '''#!/bin/sh
+
+# {filename}
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# List TLS1.3 compat test cases. They are generated by
+# `{cmd}`.
+#
+# PLEASE DO NOT EDIT THIS FILE. IF NEEDED, PLEASE MODIFY `generate_tls13_compat_tests.py`
+# AND REGENERATE THIS FILE.
+#
+'''
+
+def main():
+ """
+ Main function of this program
+ """
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-o', '--output', nargs='?',
+ default=None, help='Output file path if `-a` was set')
+
+ parser.add_argument('-a', '--generate-all-tls13-compat-tests', action='store_true',
+ default=False, help='Generate all available tls13 compat tests')
+
+ parser.add_argument('--list-ciphers', action='store_true',
+ default=False, help='List supported ciphersuites')
+
+ parser.add_argument('--list-sig-algs', action='store_true',
+ default=False, help='List supported signature algorithms')
+
+ parser.add_argument('--list-named-groups', action='store_true',
+ default=False, help='List supported named groups')
+
+ parser.add_argument('--list-servers', action='store_true',
+ default=False, help='List supported TLS servers')
+
+ parser.add_argument('--list-clients', action='store_true',
+ default=False, help='List supported TLS Clients')
+
+ parser.add_argument('server', choices=SERVER_CLASSES.keys(), nargs='?',
+ default=list(SERVER_CLASSES.keys())[0],
+ help='Choose TLS server program for test')
+ parser.add_argument('client', choices=CLIENT_CLASSES.keys(), nargs='?',
+ default=list(CLIENT_CLASSES.keys())[0],
+ help='Choose TLS client program for test')
+ parser.add_argument('cipher', choices=CIPHER_SUITE_IANA_VALUE.keys(), nargs='?',
+ default=list(CIPHER_SUITE_IANA_VALUE.keys())[0],
+ help='Choose cipher suite for test')
+ parser.add_argument('sig_alg', choices=SIG_ALG_IANA_VALUE.keys(), nargs='?',
+ default=list(SIG_ALG_IANA_VALUE.keys())[0],
+ help='Choose cipher suite for test')
+ parser.add_argument('named_group', choices=NAMED_GROUP_IANA_VALUE.keys(), nargs='?',
+ default=list(NAMED_GROUP_IANA_VALUE.keys())[0],
+ help='Choose cipher suite for test')
+
+ args = parser.parse_args()
+
+ def get_all_test_cases():
+ # Generate normal compat test cases
+ for client, server, cipher, named_group, sig_alg in \
+ itertools.product(CLIENT_CLASSES.keys(),
+ SERVER_CLASSES.keys(),
+ CIPHER_SUITE_IANA_VALUE.keys(),
+ NAMED_GROUP_IANA_VALUE.keys(),
+ SIG_ALG_IANA_VALUE.keys()):
+ if server == 'mbedTLS' or client == 'mbedTLS':
+ yield generate_compat_test(client=client, server=server,
+ cipher=cipher, named_group=named_group,
+ sig_alg=sig_alg)
+
+
+ # Generate Hello Retry Request compat test cases
+ for client, server, client_named_group, server_named_group in \
+ itertools.product(CLIENT_CLASSES.keys(),
+ SERVER_CLASSES.keys(),
+ NAMED_GROUP_IANA_VALUE.keys(),
+ NAMED_GROUP_IANA_VALUE.keys()):
+
+ if (client == 'mbedTLS' or server == 'mbedTLS') and \
+ client_named_group != server_named_group:
+ yield generate_hrr_compat_test(client=client, server=server,
+ client_named_group=client_named_group,
+ server_named_group=server_named_group,
+ cert_sig_alg="ecdsa_secp256r1_sha256")
+
+ if args.generate_all_tls13_compat_tests:
+ if args.output:
+ with open(args.output, 'w', encoding="utf-8") as f:
+ f.write(SSL_OUTPUT_HEADER.format(
+ filename=os.path.basename(args.output), cmd=' '.join(sys.argv)))
+ f.write('\n\n'.join(get_all_test_cases()))
+ f.write('\n')
+ else:
+ print('\n\n'.join(get_all_test_cases()))
+ return 0
+
+ if args.list_ciphers or args.list_sig_algs or args.list_named_groups \
+ or args.list_servers or args.list_clients:
+ if args.list_ciphers:
+ print(*CIPHER_SUITE_IANA_VALUE.keys())
+ if args.list_sig_algs:
+ print(*SIG_ALG_IANA_VALUE.keys())
+ if args.list_named_groups:
+ print(*NAMED_GROUP_IANA_VALUE.keys())
+ if args.list_servers:
+ print(*SERVER_CLASSES.keys())
+ if args.list_clients:
+ print(*CLIENT_CLASSES.keys())
+ return 0
+
+ print(generate_compat_test(server=args.server, client=args.client, sig_alg=args.sig_alg,
+ cipher=args.cipher, named_group=args.named_group))
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tests/scripts/list-identifiers.sh b/tests/scripts/list-identifiers.sh
new file mode 100755
index 0000000..4ccac23
--- /dev/null
+++ b/tests/scripts/list-identifiers.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Create a file named identifiers containing identifiers from internal header
+# files, based on the --internal flag.
+# Outputs the line count of the file to stdout.
+# A very thin wrapper around list_internal_identifiers.py for backwards
+# compatibility.
+# Must be run from Mbed TLS root.
+#
+# Usage: list-identifiers.sh [ -i | --internal ]
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+set -eu
+
+if [ -d include/mbedtls ]; then :; else
+ echo "$0: Must be run from Mbed TLS root" >&2
+ exit 1
+fi
+
+INTERNAL=""
+
+until [ -z "${1-}" ]
+do
+ case "$1" in
+ -i|--internal)
+ INTERNAL="1"
+ ;;
+ *)
+ # print error
+ echo "Unknown argument: '$1'"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+if [ $INTERNAL ]
+then
+ tests/scripts/list_internal_identifiers.py
+ wc -l identifiers
+else
+ cat <<EOF
+Sorry, this script has to be called with --internal.
+
+This script exists solely for backwards compatibility with the previous
+iteration of list-identifiers.sh, of which only the --internal option remains in
+use. It is a thin wrapper around list_internal_identifiers.py.
+
+check-names.sh, which used to depend on this script, has been replaced with
+check_names.py and is now self-complete.
+EOF
+fi
diff --git a/tests/scripts/list_internal_identifiers.py b/tests/scripts/list_internal_identifiers.py
new file mode 100755
index 0000000..b648ce2
--- /dev/null
+++ b/tests/scripts/list_internal_identifiers.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+This script generates a file called identifiers that contains all Mbed TLS
+identifiers found on internal headers. This is the equivalent of what was
+previously `list-identifiers.sh --internal`, and is useful for generating an
+exclusion file list for ABI/API checking, since we do not promise compatibility
+for them.
+
+It uses the CodeParser class from check_names.py to perform the parsing.
+
+The script returns 0 on success, 1 if there is a script error.
+Must be run from Mbed TLS root.
+"""
+
+import argparse
+import logging
+from check_names import CodeParser
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=(
+ "This script writes a list of parsed identifiers in internal "
+ "headers to \"identifiers\". This is useful for generating a list "
+ "of names to exclude from API/ABI compatibility checking. "))
+
+ parser.parse_args()
+
+ name_check = CodeParser(logging.getLogger())
+ result = name_check.parse_identifiers([
+ "include/mbedtls/*_internal.h",
+ "library/*.h"
+ ])[0]
+ result.sort(key=lambda x: x.name)
+
+ identifiers = ["{}\n".format(match.name) for match in result]
+ with open("identifiers", "w", encoding="utf-8") as f:
+ f.writelines(identifiers)
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/scripts/psa_collect_statuses.py b/tests/scripts/psa_collect_statuses.py
new file mode 100755
index 0000000..11bbebc
--- /dev/null
+++ b/tests/scripts/psa_collect_statuses.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python3
+"""Describe the test coverage of PSA functions in terms of return statuses.
+
+1. Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG
+2. Run psa_collect_statuses.py
+
+The output is a series of line of the form "psa_foo PSA_ERROR_XXX". Each
+function/status combination appears only once.
+
+This script must be run from the top of an Mbed TLS source tree.
+The build command is "make -DRECORD_PSA_STATUS_COVERAGE_LOG", which is
+only supported with make (as opposed to CMake or other build methods).
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import os
+import subprocess
+import sys
+
+DEFAULT_STATUS_LOG_FILE = 'tests/statuses.log'
+DEFAULT_PSA_CONSTANT_NAMES = 'programs/psa/psa_constant_names'
+
+class Statuses:
+ """Information about observed return statues of API functions."""
+
+ def __init__(self):
+ self.functions = {}
+ self.codes = set()
+ self.status_names = {}
+
+ def collect_log(self, log_file_name):
+ """Read logs from RECORD_PSA_STATUS_COVERAGE_LOG.
+
+ Read logs produced by running Mbed TLS test suites built with
+ -DRECORD_PSA_STATUS_COVERAGE_LOG.
+ """
+ with open(log_file_name) as log:
+ for line in log:
+ value, function, tail = line.split(':', 2)
+ if function not in self.functions:
+ self.functions[function] = {}
+ fdata = self.functions[function]
+ if value not in self.functions[function]:
+ fdata[value] = []
+ fdata[value].append(tail)
+ self.codes.add(int(value))
+
+ def get_constant_names(self, psa_constant_names):
+ """Run psa_constant_names to obtain names for observed numerical values."""
+ values = [str(value) for value in self.codes]
+ cmd = [psa_constant_names, 'status'] + values
+ output = subprocess.check_output(cmd).decode('ascii')
+ for value, name in zip(values, output.rstrip().split('\n')):
+ self.status_names[value] = name
+
+ def report(self):
+ """Report observed return values for each function.
+
+ The report is a series of line of the form "psa_foo PSA_ERROR_XXX".
+ """
+ for function in sorted(self.functions.keys()):
+ fdata = self.functions[function]
+ names = [self.status_names[value] for value in fdata.keys()]
+ for name in sorted(names):
+ sys.stdout.write('{} {}\n'.format(function, name))
+
+def collect_status_logs(options):
+ """Build and run unit tests and report observed function return statuses.
+
+ Build Mbed TLS with -DRECORD_PSA_STATUS_COVERAGE_LOG, run the
+ test suites and display information about observed return statuses.
+ """
+ rebuilt = False
+ if not options.use_existing_log and os.path.exists(options.log_file):
+ os.remove(options.log_file)
+ if not os.path.exists(options.log_file):
+ if options.clean_before:
+ subprocess.check_call(['make', 'clean'],
+ cwd='tests',
+ stdout=sys.stderr)
+ with open(os.devnull, 'w') as devnull:
+ make_q_ret = subprocess.call(['make', '-q', 'lib', 'tests'],
+ stdout=devnull, stderr=devnull)
+ if make_q_ret != 0:
+ subprocess.check_call(['make', 'RECORD_PSA_STATUS_COVERAGE_LOG=1'],
+ stdout=sys.stderr)
+ rebuilt = True
+ subprocess.check_call(['make', 'test'],
+ stdout=sys.stderr)
+ data = Statuses()
+ data.collect_log(options.log_file)
+ data.get_constant_names(options.psa_constant_names)
+ if rebuilt and options.clean_after:
+ subprocess.check_call(['make', 'clean'],
+ cwd='tests',
+ stdout=sys.stderr)
+ return data
+
+def main():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ parser.add_argument('--clean-after',
+ action='store_true',
+ help='Run "make clean" after rebuilding')
+ parser.add_argument('--clean-before',
+ action='store_true',
+ help='Run "make clean" before regenerating the log file)')
+ parser.add_argument('--log-file', metavar='FILE',
+ default=DEFAULT_STATUS_LOG_FILE,
+ help='Log file location (default: {})'.format(
+ DEFAULT_STATUS_LOG_FILE
+ ))
+ parser.add_argument('--psa-constant-names', metavar='PROGRAM',
+ default=DEFAULT_PSA_CONSTANT_NAMES,
+ help='Path to psa_constant_names (default: {})'.format(
+ DEFAULT_PSA_CONSTANT_NAMES
+ ))
+ parser.add_argument('--use-existing-log', '-e',
+ action='store_true',
+ help='Don\'t regenerate the log file if it exists')
+ options = parser.parse_args()
+ data = collect_status_logs(options)
+ data.report()
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/quiet/cmake b/tests/scripts/quiet/cmake
new file mode 100755
index 0000000..a34365b
--- /dev/null
+++ b/tests/scripts/quiet/cmake
@@ -0,0 +1,19 @@
+#! /usr/bin/env bash
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+
+# export VERBOSE_LOGS=1
+
+# don't silence invocations containing these arguments
+NO_SILENCE=" --version "
+
+TOOL="cmake"
+
+. "$(dirname "$0")/quiet.sh"
diff --git a/tests/scripts/quiet/make b/tests/scripts/quiet/make
new file mode 100755
index 0000000..920e5b8
--- /dev/null
+++ b/tests/scripts/quiet/make
@@ -0,0 +1,19 @@
+#! /usr/bin/env bash
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+
+# export VERBOSE_LOGS=1
+
+# don't silence invocations containing these arguments
+NO_SILENCE=" --version | test "
+
+TOOL="make"
+
+. "$(dirname "$0")/quiet.sh"
diff --git a/tests/scripts/quiet/quiet.sh b/tests/scripts/quiet/quiet.sh
new file mode 100644
index 0000000..0f26184
--- /dev/null
+++ b/tests/scripts/quiet/quiet.sh
@@ -0,0 +1,79 @@
+# -*-mode: sh; sh-shell: bash -*-
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# This swallows the output of the wrapped tool, unless there is an error.
+# This helps reduce excess logging in the CI.
+
+# If you are debugging a build / CI issue, you can get complete unsilenced logs
+# by un-commenting the following line (or setting VERBOSE_LOGS in your environment):
+#
+# VERBOSE_LOGS=1
+#
+# This script provides most of the functionality for the adjacent make and cmake
+# wrappers.
+#
+# It requires two variables to be set:
+#
+# TOOL - the name of the tool that is being wrapped (with no path), e.g. "make"
+#
+# NO_SILENCE - a regex that describes the commandline arguments for which output will not
+# be silenced, e.g. " --version | test ". In this example, "make lib test" will
+# not be silent, but "make lib" will be.
+
+# Identify path to original tool. There is an edge-case here where the quiet wrapper is on the path via
+# a symlink or relative path, but "type -ap" yields the wrapper with it's normalised path. We use
+# the -ef operator to compare paths, to avoid picking the wrapper in this case (to avoid infinitely
+# recursing).
+while IFS= read -r ORIGINAL_TOOL; do
+ if ! [[ $ORIGINAL_TOOL -ef "$0" ]]; then break; fi
+done < <(type -ap -- "$TOOL")
+
+print_quoted_args() {
+ # similar to printf '%q' "$@"
+ # but produce more human-readable results for common/simple cases like "a b"
+ for a in "$@"; do
+ # Get bash to quote the string
+ printf -v q '%q' "$a"
+ simple_pattern="^([-[:alnum:]_+./:@]+=)?([^']*)$"
+ if [[ "$a" != "$q" && $a =~ $simple_pattern ]]; then
+ # a requires some quoting (a != q), but has no single quotes, so we can
+ # simplify the quoted form - e.g.:
+ # a b -> 'a b'
+ # CFLAGS=a b -> CFLAGS='a b'
+ q="${BASH_REMATCH[1]}'${BASH_REMATCH[2]}'"
+ fi
+ printf " %s" "$q"
+ done
+}
+
+if [[ ! " $* " =~ " --version " ]]; then
+ # Display the command being invoked - if it succeeds, this is all that will
+ # be displayed. Don't do this for invocations with --version, because
+ # this output is often parsed by scripts, so we don't want to modify it.
+ printf %s "${TOOL}" 1>&2
+ print_quoted_args "$@" 1>&2
+ echo 1>&2
+fi
+
+if [[ " $@ " =~ $NO_SILENCE || -n "${VERBOSE_LOGS}" ]]; then
+ # Run original command with no output supression
+ exec "${ORIGINAL_TOOL}" "$@"
+else
+ # Run original command and capture output & exit status
+ TMPFILE=$(mktemp "quiet-${TOOL}.XXXXXX")
+ "${ORIGINAL_TOOL}" "$@" > "${TMPFILE}" 2>&1
+ EXIT_STATUS=$?
+
+ if [[ $EXIT_STATUS -ne 0 ]]; then
+ # On error, display the full output
+ cat "${TMPFILE}"
+ fi
+
+ # Remove tmpfile
+ rm "${TMPFILE}"
+
+ # Propagate the exit status
+ exit $EXIT_STATUS
+fi
diff --git a/tests/scripts/recursion.pl b/tests/scripts/recursion.pl
new file mode 100755
index 0000000..3cdeff7
--- /dev/null
+++ b/tests/scripts/recursion.pl
@@ -0,0 +1,47 @@
+#!/usr/bin/env perl
+
+# Find functions making recursive calls to themselves.
+# (Multiple recursion where a() calls b() which calls a() not covered.)
+#
+# When the recursion depth might depend on data controlled by the attacker in
+# an unbounded way, those functions should use iteration instead.
+#
+# Typical usage: scripts/recursion.pl library/*.c
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use warnings;
+use strict;
+
+use utf8;
+use open qw(:std utf8);
+
+# exclude functions that are ok:
+# - mpi_write_hlp: bounded by size of mbedtls_mpi, a compile-time constant
+# - x509_crt_verify_child: bounded by MBEDTLS_X509_MAX_INTERMEDIATE_CA
+my $known_ok = qr/mpi_write_hlp|x509_crt_verify_child/;
+
+my $cur_name;
+my $inside;
+my @funcs;
+
+die "Usage: $0 file.c [...]\n" unless @ARGV;
+
+while (<>)
+{
+ if( /^[^\/#{}\s]/ && ! /\[.*]/ ) {
+ chomp( $cur_name = $_ ) unless $inside;
+ } elsif( /^{/ && $cur_name ) {
+ $inside = 1;
+ $cur_name =~ s/.* ([^ ]*)\(.*/$1/;
+ } elsif( /^}/ && $inside ) {
+ undef $inside;
+ undef $cur_name;
+ } elsif( $inside && /\b\Q$cur_name\E\([^)]/ ) {
+ push @funcs, $cur_name unless /$known_ok/;
+ }
+}
+
+print "$_\n" for @funcs;
+exit @funcs;
diff --git a/tests/scripts/run-metatests.sh b/tests/scripts/run-metatests.sh
new file mode 100755
index 0000000..22a302c
--- /dev/null
+++ b/tests/scripts/run-metatests.sh
@@ -0,0 +1,89 @@
+#!/bin/sh
+
+help () {
+ cat <<EOF
+Usage: $0 [OPTION] [PLATFORM]...
+Run all the metatests whose platform matches any of the given PLATFORM.
+A PLATFORM can contain shell wildcards.
+
+Expected output: a lot of scary-looking error messages, since each
+metatest is expected to report a failure. The final line should be
+"Ran N metatests, all good."
+
+If something goes wrong: the final line should be
+"Ran N metatests, X unexpected successes". Look for "Unexpected success"
+in the logs above.
+
+ -l List the available metatests, don't run them.
+EOF
+}
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+set -e -u
+
+if [ -d programs ]; then
+ METATEST_PROGRAM=programs/test/metatest
+elif [ -d ../programs ]; then
+ METATEST_PROGRAM=../programs/test/metatest
+elif [ -d ../../programs ]; then
+ METATEST_PROGRAM=../../programs/test/metatest
+else
+ echo >&2 "$0: FATAL: programs/test/metatest not found"
+ exit 120
+fi
+
+LIST_ONLY=
+while getopts hl OPTLET; do
+ case $OPTLET in
+ h) help; exit;;
+ l) LIST_ONLY=1;;
+ \?) help >&2; exit 120;;
+ esac
+done
+shift $((OPTIND - 1))
+
+list_matches () {
+ while read name platform junk; do
+ for pattern in "$@"; do
+ case $platform in
+ $pattern) echo "$name"; break;;
+ esac
+ done
+ done
+}
+
+count=0
+errors=0
+run_metatest () {
+ ret=0
+ "$METATEST_PROGRAM" "$1" || ret=$?
+ if [ $ret -eq 0 ]; then
+ echo >&2 "$0: Unexpected success: $1"
+ errors=$((errors + 1))
+ fi
+ count=$((count + 1))
+}
+
+# Don't pipe the output of metatest so that if it fails, this script exits
+# immediately with a failure status.
+full_list=$("$METATEST_PROGRAM" list)
+matching_list=$(printf '%s\n' "$full_list" | list_matches "$@")
+
+if [ -n "$LIST_ONLY" ]; then
+ printf '%s\n' $matching_list
+ exit
+fi
+
+for name in $matching_list; do
+ run_metatest "$name"
+done
+
+if [ $errors -eq 0 ]; then
+ echo "Ran $count metatests, all good."
+ exit 0
+else
+ echo "Ran $count metatests, $errors unexpected successes."
+ exit 1
+fi
diff --git a/tests/scripts/run-test-suites.pl b/tests/scripts/run-test-suites.pl
new file mode 100755
index 0000000..e0ee3f5
--- /dev/null
+++ b/tests/scripts/run-test-suites.pl
@@ -0,0 +1,158 @@
+#!/usr/bin/env perl
+
+# run-test-suites.pl
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+=head1 SYNOPSIS
+
+Execute all the test suites and print a summary of the results.
+
+ run-test-suites.pl [[-v|--verbose] [VERBOSITY]] [--skip=SUITE[...]]
+
+Options:
+
+ -v|--verbose Print detailed failure information.
+ -v 2|--verbose=2 Print detailed failure information and summary messages.
+ -v 3|--verbose=3 Print detailed information about every test case.
+ --skip=SUITE[,SUITE...]
+ Skip the specified SUITE(s). This option can be used
+ multiple times.
+
+=cut
+
+use warnings;
+use strict;
+
+use utf8;
+use open qw(:std utf8);
+
+use Getopt::Long qw(:config auto_help gnu_compat);
+use Pod::Usage;
+
+my $verbose = 0;
+my @skip_patterns = ();
+GetOptions(
+ 'skip=s' => \@skip_patterns,
+ 'verbose|v:1' => \$verbose,
+ ) or die;
+
+# All test suites = executable files with a .datax file.
+my @suites = ();
+for my $data_file (glob 'test_suite_*.datax') {
+ (my $base = $data_file) =~ s/\.datax$//;
+ push @suites, $base if -x $base;
+ push @suites, "$base.exe" if -e "$base.exe";
+}
+die "$0: no test suite found\n" unless @suites;
+
+# "foo" as a skip pattern skips "test_suite_foo" and "test_suite_foo.bar"
+# but not "test_suite_foobar".
+my $skip_re =
+ ( '\Atest_suite_(' .
+ join('|', map {
+ s/[ ,;]/|/g; # allow any of " ,;|" as separators
+ s/\./\./g; # "." in the input means ".", not "any character"
+ $_
+ } @skip_patterns) .
+ ')(\z|\.)' );
+
+# in case test suites are linked dynamically
+$ENV{'LD_LIBRARY_PATH'} = '../library';
+$ENV{'DYLD_LIBRARY_PATH'} = '../library';
+
+my $prefix = $^O eq "MSWin32" ? '' : './';
+
+my (@failed_suites, $total_tests_run, $failed, $suite_cases_passed,
+ $suite_cases_failed, $suite_cases_skipped, $total_cases_passed,
+ $total_cases_failed, $total_cases_skipped );
+my $suites_skipped = 0;
+
+sub pad_print_center {
+ my( $width, $padchar, $string ) = @_;
+ my $padlen = ( $width - length( $string ) - 2 ) / 2;
+ print $padchar x( $padlen ), " $string ", $padchar x( $padlen ), "\n";
+}
+
+for my $suite (@suites)
+{
+ print "$suite ", "." x ( 72 - length($suite) - 2 - 4 ), " ";
+ if( $suite =~ /$skip_re/o ) {
+ print "SKIP\n";
+ ++$suites_skipped;
+ next;
+ }
+
+ my $command = "$prefix$suite";
+ if( $verbose ) {
+ $command .= ' -v';
+ }
+ my $result = `$command`;
+
+ $suite_cases_passed = () = $result =~ /.. PASS/g;
+ $suite_cases_failed = () = $result =~ /.. FAILED/g;
+ $suite_cases_skipped = () = $result =~ /.. ----/g;
+
+ if( $? == 0 ) {
+ print "PASS\n";
+ if( $verbose > 2 ) {
+ pad_print_center( 72, '-', "Begin $suite" );
+ print $result;
+ pad_print_center( 72, '-', "End $suite" );
+ }
+ } else {
+ push @failed_suites, $suite;
+ print "FAIL\n";
+ if( $verbose ) {
+ pad_print_center( 72, '-', "Begin $suite" );
+ print $result;
+ pad_print_center( 72, '-', "End $suite" );
+ }
+ }
+
+ my ($passed, $tests, $skipped) = $result =~ /([0-9]*) \/ ([0-9]*) tests.*?([0-9]*) skipped/;
+ $total_tests_run += $tests - $skipped;
+
+ if( $verbose > 1 ) {
+ print "(test cases passed:", $suite_cases_passed,
+ " failed:", $suite_cases_failed,
+ " skipped:", $suite_cases_skipped,
+ " of total:", ($suite_cases_passed + $suite_cases_failed +
+ $suite_cases_skipped),
+ ")\n"
+ }
+
+ $total_cases_passed += $suite_cases_passed;
+ $total_cases_failed += $suite_cases_failed;
+ $total_cases_skipped += $suite_cases_skipped;
+}
+
+print "-" x 72, "\n";
+print @failed_suites ? "FAILED" : "PASSED";
+printf( " (%d suites, %d tests run%s)\n",
+ scalar(@suites) - $suites_skipped,
+ $total_tests_run,
+ $suites_skipped ? ", $suites_skipped suites skipped" : "" );
+
+if( $verbose && @failed_suites ) {
+ # the output can be very long, so provide a summary of which suites failed
+ print " failed suites : @failed_suites\n";
+}
+
+if( $verbose > 1 ) {
+ print " test cases passed :", $total_cases_passed, "\n";
+ print " failed :", $total_cases_failed, "\n";
+ print " skipped :", $total_cases_skipped, "\n";
+ print " of tests executed :", ( $total_cases_passed + $total_cases_failed ),
+ "\n";
+ print " of available tests :",
+ ( $total_cases_passed + $total_cases_failed + $total_cases_skipped ),
+ "\n";
+ if( $suites_skipped != 0 ) {
+ print "Note: $suites_skipped suites were skipped.\n";
+ }
+}
+
+exit( @failed_suites ? 1 : 0 );
+
diff --git a/tests/scripts/run_demos.py b/tests/scripts/run_demos.py
new file mode 100755
index 0000000..6a63d23
--- /dev/null
+++ b/tests/scripts/run_demos.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+"""Run the Mbed TLS demo scripts.
+"""
+import argparse
+import glob
+import subprocess
+import sys
+
+def run_demo(demo, quiet=False):
+ """Run the specified demo script. Return True if it succeeds."""
+ args = {}
+ if quiet:
+ args['stdout'] = subprocess.DEVNULL
+ args['stderr'] = subprocess.DEVNULL
+ returncode = subprocess.call([demo], **args)
+ return returncode == 0
+
+def run_demos(demos, quiet=False):
+ """Run the specified demos and print summary information about failures.
+
+ Return True if all demos passed and False if a demo fails.
+ """
+ failures = []
+ for demo in demos:
+ if not quiet:
+ print('#### {} ####'.format(demo))
+ success = run_demo(demo, quiet=quiet)
+ if not success:
+ failures.append(demo)
+ if not quiet:
+ print('{}: FAIL'.format(demo))
+ if quiet:
+ print('{}: {}'.format(demo, 'PASS' if success else 'FAIL'))
+ else:
+ print('')
+ successes = len(demos) - len(failures)
+ print('{}/{} demos passed'.format(successes, len(demos)))
+ if failures and not quiet:
+ print('Failures:', *failures)
+ return not failures
+
+def run_all_demos(quiet=False):
+ """Run all the available demos.
+
+ Return True if all demos passed and False if a demo fails.
+ """
+ all_demos = glob.glob('programs/*/*_demo.sh')
+ if not all_demos:
+ # Keep the message on one line. pylint: disable=line-too-long
+ raise Exception('No demos found. run_demos needs to operate from the Mbed TLS toplevel directory.')
+ return run_demos(all_demos, quiet=quiet)
+
+def main():
+ parser = argparse.ArgumentParser(description=__doc__)
+ parser.add_argument('--quiet', '-q',
+ action='store_true',
+ help="suppress the output of demos")
+ options = parser.parse_args()
+ success = run_all_demos(quiet=options.quiet)
+ sys.exit(0 if success else 1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/scripts_path.py b/tests/scripts/scripts_path.py
new file mode 100644
index 0000000..5d83f29
--- /dev/null
+++ b/tests/scripts/scripts_path.py
@@ -0,0 +1,17 @@
+"""Add our Python library directory to the module search path.
+
+Usage:
+
+ import scripts_path # pylint: disable=unused-import
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__),
+ os.path.pardir, os.path.pardir,
+ 'scripts'))
diff --git a/tests/scripts/set_psa_test_dependencies.py b/tests/scripts/set_psa_test_dependencies.py
new file mode 100755
index 0000000..f68dfcb
--- /dev/null
+++ b/tests/scripts/set_psa_test_dependencies.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python3
+
+"""Edit test cases to use PSA dependencies instead of classic dependencies.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import os
+import re
+import sys
+
+CLASSIC_DEPENDENCIES = frozenset([
+ # This list is manually filtered from mbedtls_config.h.
+
+ # Mbed TLS feature support.
+ # Only features that affect what can be done are listed here.
+ # Options that control optimizations or alternative implementations
+ # are omitted.
+ 'MBEDTLS_CIPHER_MODE_CBC',
+ 'MBEDTLS_CIPHER_MODE_CFB',
+ 'MBEDTLS_CIPHER_MODE_CTR',
+ 'MBEDTLS_CIPHER_MODE_OFB',
+ 'MBEDTLS_CIPHER_MODE_XTS',
+ 'MBEDTLS_CIPHER_NULL_CIPHER',
+ 'MBEDTLS_CIPHER_PADDING_PKCS7',
+ 'MBEDTLS_CIPHER_PADDING_ONE_AND_ZEROS',
+ 'MBEDTLS_CIPHER_PADDING_ZEROS_AND_LEN',
+ 'MBEDTLS_CIPHER_PADDING_ZEROS',
+ #curve#'MBEDTLS_ECP_DP_SECP192R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP224R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP256R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP384R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP521R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP192K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP224K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_SECP256K1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP256R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP384R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_BP512R1_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_CURVE25519_ENABLED',
+ #curve#'MBEDTLS_ECP_DP_CURVE448_ENABLED',
+ 'MBEDTLS_ECDSA_DETERMINISTIC',
+ #'MBEDTLS_GENPRIME', #needed for RSA key generation
+ 'MBEDTLS_PKCS1_V15',
+ 'MBEDTLS_PKCS1_V21',
+
+ # Mbed TLS modules.
+ # Only modules that provide cryptographic mechanisms are listed here.
+ # Platform, data formatting, X.509 or TLS modules are omitted.
+ 'MBEDTLS_AES_C',
+ 'MBEDTLS_BIGNUM_C',
+ 'MBEDTLS_CAMELLIA_C',
+ 'MBEDTLS_ARIA_C',
+ 'MBEDTLS_CCM_C',
+ 'MBEDTLS_CHACHA20_C',
+ 'MBEDTLS_CHACHAPOLY_C',
+ 'MBEDTLS_CMAC_C',
+ 'MBEDTLS_CTR_DRBG_C',
+ 'MBEDTLS_DES_C',
+ 'MBEDTLS_DHM_C',
+ 'MBEDTLS_ECDH_C',
+ 'MBEDTLS_ECDSA_C',
+ 'MBEDTLS_ECJPAKE_C',
+ 'MBEDTLS_ECP_C',
+ 'MBEDTLS_ENTROPY_C',
+ 'MBEDTLS_GCM_C',
+ 'MBEDTLS_HKDF_C',
+ 'MBEDTLS_HMAC_DRBG_C',
+ 'MBEDTLS_NIST_KW_C',
+ 'MBEDTLS_MD5_C',
+ 'MBEDTLS_PKCS5_C',
+ 'MBEDTLS_PKCS12_C',
+ 'MBEDTLS_POLY1305_C',
+ 'MBEDTLS_RIPEMD160_C',
+ 'MBEDTLS_RSA_C',
+ 'MBEDTLS_SHA1_C',
+ 'MBEDTLS_SHA256_C',
+ 'MBEDTLS_SHA512_C',
+])
+
+def is_classic_dependency(dep):
+ """Whether dep is a classic dependency that PSA test cases should not use."""
+ if dep.startswith('!'):
+ dep = dep[1:]
+ return dep in CLASSIC_DEPENDENCIES
+
+def is_systematic_dependency(dep):
+ """Whether dep is a PSA dependency which is determined systematically."""
+ if dep.startswith('PSA_WANT_ECC_'):
+ return False
+ return dep.startswith('PSA_WANT_')
+
+WITHOUT_SYSTEMATIC_DEPENDENCIES = frozenset([
+ 'PSA_ALG_AEAD_WITH_SHORTENED_TAG', # only a modifier
+ 'PSA_ALG_ANY_HASH', # only meaningful in policies
+ 'PSA_ALG_KEY_AGREEMENT', # only a way to combine algorithms
+ 'PSA_ALG_TRUNCATED_MAC', # only a modifier
+ 'PSA_KEY_TYPE_NONE', # not a real key type
+ 'PSA_KEY_TYPE_DERIVE', # always supported, don't list it to reduce noise
+ 'PSA_KEY_TYPE_RAW_DATA', # always supported, don't list it to reduce noise
+ 'PSA_ALG_AT_LEAST_THIS_LENGTH_MAC', #only a modifier
+ 'PSA_ALG_AEAD_WITH_AT_LEAST_THIS_LENGTH_TAG', #only a modifier
+])
+
+SPECIAL_SYSTEMATIC_DEPENDENCIES = {
+ 'PSA_ALG_ECDSA_ANY': frozenset(['PSA_WANT_ALG_ECDSA']),
+ 'PSA_ALG_RSA_PKCS1V15_SIGN_RAW': frozenset(['PSA_WANT_ALG_RSA_PKCS1V15_SIGN']),
+}
+
+def dependencies_of_symbol(symbol):
+ """Return the dependencies for a symbol that designates a cryptographic mechanism."""
+ if symbol in WITHOUT_SYSTEMATIC_DEPENDENCIES:
+ return frozenset()
+ if symbol in SPECIAL_SYSTEMATIC_DEPENDENCIES:
+ return SPECIAL_SYSTEMATIC_DEPENDENCIES[symbol]
+ if symbol.startswith('PSA_ALG_CATEGORY_') or \
+ symbol.startswith('PSA_KEY_TYPE_CATEGORY_'):
+ # Categories are used in test data when an unsupported but plausible
+ # mechanism number needed. They have no associated dependency.
+ return frozenset()
+ return {symbol.replace('_', '_WANT_', 1)}
+
+def systematic_dependencies(file_name, function_name, arguments):
+ """List the systematically determined dependency for a test case."""
+ deps = set()
+
+ # Run key policy negative tests even if the algorithm to attempt performing
+ # is not supported but in the case where the test is to check an
+ # incompatibility between a requested algorithm for a cryptographic
+ # operation and a key policy. In the latter, we want to filter out the
+ # cases # where PSA_ERROR_NOT_SUPPORTED is returned instead of
+ # PSA_ERROR_NOT_PERMITTED.
+ if function_name.endswith('_key_policy') and \
+ arguments[-1].startswith('PSA_ERROR_') and \
+ arguments[-1] != ('PSA_ERROR_NOT_PERMITTED'):
+ arguments[-2] = ''
+ if function_name == 'copy_fail' and \
+ arguments[-1].startswith('PSA_ERROR_'):
+ arguments[-2] = ''
+ arguments[-3] = ''
+
+ # Storage format tests that only look at how the file is structured and
+ # don't care about the format of the key material don't depend on any
+ # cryptographic mechanisms.
+ if os.path.basename(file_name) == 'test_suite_psa_crypto_persistent_key.data' and \
+ function_name in {'format_storage_data_check',
+ 'parse_storage_data_check'}:
+ return []
+
+ for arg in arguments:
+ for symbol in re.findall(r'PSA_(?:ALG|KEY_TYPE)_\w+', arg):
+ deps.update(dependencies_of_symbol(symbol))
+ return sorted(deps)
+
+def updated_dependencies(file_name, function_name, arguments, dependencies):
+ """Rework the list of dependencies into PSA_WANT_xxx.
+
+ Remove classic crypto dependencies such as MBEDTLS_RSA_C,
+ MBEDTLS_PKCS1_V15, etc.
+
+ Add systematic PSA_WANT_xxx dependencies based on the called function and
+ its arguments, replacing existing PSA_WANT_xxx dependencies.
+ """
+ automatic = systematic_dependencies(file_name, function_name, arguments)
+ manual = [dep for dep in dependencies
+ if not (is_systematic_dependency(dep) or
+ is_classic_dependency(dep))]
+ return automatic + manual
+
+def keep_manual_dependencies(file_name, function_name, arguments):
+ #pylint: disable=unused-argument
+ """Declare test functions with unusual dependencies here."""
+ # If there are no arguments, we can't do any useful work. Assume that if
+ # there are dependencies, they are warranted.
+ if not arguments:
+ return True
+ # When PSA_ERROR_NOT_SUPPORTED is expected, usually, at least one of the
+ # constants mentioned in the test should not be supported. It isn't
+ # possible to determine which one in a systematic way. So let the programmer
+ # decide.
+ if arguments[-1] == 'PSA_ERROR_NOT_SUPPORTED':
+ return True
+ return False
+
+def process_data_stanza(stanza, file_name, test_case_number):
+ """Update PSA crypto dependencies in one Mbed TLS test case.
+
+ stanza is the test case text (including the description, the dependencies,
+ the line with the function and arguments, and optionally comments). Return
+ a new stanza with an updated dependency line, preserving everything else
+ (description, comments, arguments, etc.).
+ """
+ if not stanza.lstrip('\n'):
+ # Just blank lines
+ return stanza
+ # Expect 2 or 3 non-comment lines: description, optional dependencies,
+ # function-and-arguments.
+ content_matches = list(re.finditer(r'^[\t ]*([^\t #].*)$', stanza, re.M))
+ if len(content_matches) < 2:
+ raise Exception('Not enough content lines in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ if len(content_matches) > 3:
+ raise Exception('Too many content lines in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ arguments = content_matches[-1].group(0).split(':')
+ function_name = arguments.pop(0)
+ if keep_manual_dependencies(file_name, function_name, arguments):
+ return stanza
+ if len(content_matches) == 2:
+ # Insert a line for the dependencies. If it turns out that there are
+ # no dependencies, we'll remove that empty line below.
+ dependencies_location = content_matches[-1].start()
+ text_before = stanza[:dependencies_location]
+ text_after = '\n' + stanza[dependencies_location:]
+ old_dependencies = []
+ dependencies_leader = 'depends_on:'
+ else:
+ dependencies_match = content_matches[-2]
+ text_before = stanza[:dependencies_match.start()]
+ text_after = stanza[dependencies_match.end():]
+ old_dependencies = dependencies_match.group(0).split(':')
+ dependencies_leader = old_dependencies.pop(0) + ':'
+ if dependencies_leader != 'depends_on:':
+ raise Exception('Next-to-last line does not start with "depends_on:"'
+ ' in paragraph {} in {}'
+ .format(test_case_number, file_name))
+ new_dependencies = updated_dependencies(file_name, function_name, arguments,
+ old_dependencies)
+ if new_dependencies:
+ stanza = (text_before +
+ dependencies_leader + ':'.join(new_dependencies) +
+ text_after)
+ else:
+ # The dependencies have become empty. Remove the depends_on: line.
+ assert text_after[0] == '\n'
+ stanza = text_before + text_after[1:]
+ return stanza
+
+def process_data_file(file_name, old_content):
+ """Update PSA crypto dependencies in an Mbed TLS test suite data file.
+
+ Process old_content (the old content of the file) and return the new content.
+ """
+ old_stanzas = old_content.split('\n\n')
+ new_stanzas = [process_data_stanza(stanza, file_name, n)
+ for n, stanza in enumerate(old_stanzas, start=1)]
+ return '\n\n'.join(new_stanzas)
+
+def update_file(file_name, old_content, new_content):
+ """Update the given file with the given new content.
+
+ Replace the existing file. The previous version is renamed to *.bak.
+ Don't modify the file if the content was unchanged.
+ """
+ if new_content == old_content:
+ return
+ backup = file_name + '.bak'
+ tmp = file_name + '.tmp'
+ with open(tmp, 'w', encoding='utf-8') as new_file:
+ new_file.write(new_content)
+ os.replace(file_name, backup)
+ os.replace(tmp, file_name)
+
+def process_file(file_name):
+ """Update PSA crypto dependencies in an Mbed TLS test suite data file.
+
+ Replace the existing file. The previous version is renamed to *.bak.
+ Don't modify the file if the content was unchanged.
+ """
+ old_content = open(file_name, encoding='utf-8').read()
+ if file_name.endswith('.data'):
+ new_content = process_data_file(file_name, old_content)
+ else:
+ raise Exception('File type not recognized: {}'
+ .format(file_name))
+ update_file(file_name, old_content, new_content)
+
+def main(args):
+ for file_name in args:
+ process_file(file_name)
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tests/scripts/tcp_client.pl b/tests/scripts/tcp_client.pl
new file mode 100755
index 0000000..9aff22d
--- /dev/null
+++ b/tests/scripts/tcp_client.pl
@@ -0,0 +1,89 @@
+#!/usr/bin/env perl
+
+# A simple TCP client that sends some data and expects a response.
+# Usage: tcp_client.pl HOSTNAME PORT DATA1 RESPONSE1
+# DATA: hex-encoded data to send to the server
+# RESPONSE: regexp that must match the server's response
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+use warnings;
+use strict;
+use IO::Socket::INET;
+
+# Pack hex digits into a binary string, ignoring whitespace.
+sub parse_hex {
+ my ($hex) = @_;
+ $hex =~ s/\s+//g;
+ return pack('H*', $hex);
+}
+
+## Open a TCP connection to the specified host and port.
+sub open_connection {
+ my ($host, $port) = @_;
+ my $socket = IO::Socket::INET->new(PeerAddr => $host,
+ PeerPort => $port,
+ Proto => 'tcp',
+ Timeout => 1);
+ die "Cannot connect to $host:$port: $!" unless $socket;
+ return $socket;
+}
+
+## Close the TCP connection.
+sub close_connection {
+ my ($connection) = @_;
+ $connection->shutdown(2);
+ # Ignore shutdown failures (at least for now)
+ return 1;
+}
+
+## Write the given data, expressed as hexadecimal
+sub write_data {
+ my ($connection, $hexdata) = @_;
+ my $data = parse_hex($hexdata);
+ my $total_sent = 0;
+ while ($total_sent < length($data)) {
+ my $sent = $connection->send($data, 0);
+ if (!defined $sent) {
+ die "Unable to send data: $!";
+ }
+ $total_sent += $sent;
+ }
+ return 1;
+}
+
+## Read a response and check it against an expected prefix
+sub read_response {
+ my ($connection, $expected_hex) = @_;
+ my $expected_data = parse_hex($expected_hex);
+ my $start_offset = 0;
+ while ($start_offset < length($expected_data)) {
+ my $actual_data;
+ my $ok = $connection->recv($actual_data, length($expected_data));
+ if (!defined $ok) {
+ die "Unable to receive data: $!";
+ }
+ if (($actual_data ^ substr($expected_data, $start_offset)) =~ /[^\000]/) {
+ printf STDERR ("Received \\x%02x instead of \\x%02x at offset %d\n",
+ ord(substr($actual_data, $-[0], 1)),
+ ord(substr($expected_data, $start_offset + $-[0], 1)),
+ $start_offset + $-[0]);
+ return 0;
+ }
+ $start_offset += length($actual_data);
+ }
+ return 1;
+}
+
+if (@ARGV != 4) {
+ print STDERR "Usage: $0 HOSTNAME PORT DATA1 RESPONSE1\n";
+ exit(3);
+}
+my ($host, $port, $data1, $response1) = @ARGV;
+my $connection = open_connection($host, $port);
+write_data($connection, $data1);
+if (!read_response($connection, $response1)) {
+ exit(1);
+}
+close_connection($connection);
diff --git a/tests/scripts/test-ref-configs.pl b/tests/scripts/test-ref-configs.pl
new file mode 100755
index 0000000..055023a
--- /dev/null
+++ b/tests/scripts/test-ref-configs.pl
@@ -0,0 +1,161 @@
+#!/usr/bin/env perl
+
+# test-ref-configs.pl
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# For each reference configuration file in the configs directory, build the
+# configuration, run the test suites and compat.sh
+#
+# Usage: tests/scripts/test-ref-configs.pl [config-name [...]]
+
+use warnings;
+use strict;
+
+my %configs = (
+ 'config-ccm-psk-tls1_2.h' => {
+ 'compat' => '-m tls12 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
+ 'test_again_with_use_psa' => 1
+ },
+ 'config-ccm-psk-dtls1_2.h' => {
+ 'compat' => '-m dtls12 -f \'^TLS-PSK-WITH-AES-...-CCM-8\'',
+ 'opt' => ' ',
+ 'opt_needs_debug' => 1,
+ 'test_again_with_use_psa' => 1
+ },
+ 'config-no-entropy.h' => {
+ },
+ 'config-suite-b.h' => {
+ 'compat' => "-m tls12 -f 'ECDHE-ECDSA.*AES.*GCM' -p mbedTLS",
+ 'test_again_with_use_psa' => 1,
+ 'opt' => ' ',
+ 'opt_needs_debug' => 1,
+ },
+ 'config-symmetric-only.h' => {
+ 'test_again_with_use_psa' => 0, # Uses PSA by default, no need to test it twice
+ },
+ 'config-tfm.h' => {
+ 'test_again_with_use_psa' => 0, # Uses PSA by default, no need to test it twice
+ },
+ 'config-thread.h' => {
+ 'opt' => '-f ECJPAKE.*nolog',
+ 'test_again_with_use_psa' => 1,
+ },
+);
+
+# If no config-name is provided, use all known configs.
+# Otherwise, use the provided names only.
+my @configs_to_test = sort keys %configs;
+if ($#ARGV >= 0) {
+ foreach my $conf_name ( @ARGV ) {
+ if( ! exists $configs{$conf_name} ) {
+ die "Unknown configuration: $conf_name\n";
+ }
+ }
+ @configs_to_test = @ARGV;
+}
+
+-d 'library' && -d 'include' && -d 'tests' or die "Must be run from root\n";
+
+my $config_h = 'include/mbedtls/mbedtls_config.h';
+
+system( "cp $config_h $config_h.bak" ) and die;
+sub abort {
+ system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
+ # use an exit code between 1 and 124 for git bisect (die returns 255)
+ warn $_[0];
+ exit 1;
+}
+
+# Create a seedfile for configurations that enable MBEDTLS_ENTROPY_NV_SEED.
+# For test purposes, this doesn't have to be cryptographically random.
+if (!-e "tests/seedfile" || -s "tests/seedfile" < 64) {
+ local *SEEDFILE;
+ open SEEDFILE, ">tests/seedfile" or die;
+ print SEEDFILE "*" x 64 or die;
+ close SEEDFILE or die;
+}
+
+sub perform_test {
+ my $conf_file = $_[0];
+ my $data = $_[1];
+ my $test_with_psa = $_[2];
+
+ my $conf_name = $conf_file;
+ if ( $test_with_psa )
+ {
+ $conf_name .= "+PSA";
+ }
+
+ system( "cp $config_h.bak $config_h" ) and die;
+ system( "make clean" ) and die;
+
+ print "\n******************************************\n";
+ print "* Testing configuration: $conf_name\n";
+ print "******************************************\n";
+
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = $conf_name;
+
+ system( "cp configs/$conf_file $config_h" )
+ and abort "Failed to activate $conf_file\n";
+
+ if ( $test_with_psa )
+ {
+ system( "scripts/config.py set MBEDTLS_PSA_CRYPTO_C" );
+ system( "scripts/config.py set MBEDTLS_USE_PSA_CRYPTO" );
+ }
+
+ system( "CFLAGS='-Os -Werror -Wall -Wextra' make" ) and abort "Failed to build: $conf_name\n";
+ system( "make test" ) and abort "Failed test suite: $conf_name\n";
+
+ my $compat = $data->{'compat'};
+ if( $compat )
+ {
+ print "\nrunning compat.sh $compat ($conf_name)\n";
+ system( "tests/compat.sh $compat" )
+ and abort "Failed compat.sh: $conf_name\n";
+ }
+ else
+ {
+ print "\nskipping compat.sh ($conf_name)\n";
+ }
+
+ my $opt = $data->{'opt'};
+ if( $opt )
+ {
+ if( $data->{'opt_needs_debug'} )
+ {
+ print "\nrebuilding with debug traces for ssl-opt ($conf_name)\n";
+ $conf_name .= '+DEBUG';
+ $ENV{MBEDTLS_TEST_CONFIGURATION} = $conf_name;
+ system( "make clean" );
+ system( "scripts/config.py set MBEDTLS_DEBUG_C" );
+ system( "scripts/config.py set MBEDTLS_ERROR_C" );
+ system( "CFLAGS='-Os -Werror -Wall -Wextra' make" ) and abort "Failed to build: $conf_name\n";
+ }
+
+ print "\nrunning ssl-opt.sh $opt ($conf_name)\n";
+ system( "tests/ssl-opt.sh $opt" )
+ and abort "Failed ssl-opt.sh: $conf_name\n";
+ }
+ else
+ {
+ print "\nskipping ssl-opt.sh ($conf_name)\n";
+ }
+}
+
+foreach my $conf ( @configs_to_test ) {
+ my $test_with_psa = $configs{$conf}{'test_again_with_use_psa'};
+ if ( $test_with_psa )
+ {
+ perform_test( $conf, $configs{$conf}, $test_with_psa );
+ }
+ perform_test( $conf, $configs{$conf}, 0 );
+}
+
+system( "mv $config_h.bak $config_h" ) and warn "$config_h not restored\n";
+system( "make clean" );
+exit 0;
diff --git a/tests/scripts/test_config_script.py b/tests/scripts/test_config_script.py
new file mode 100755
index 0000000..e500b33
--- /dev/null
+++ b/tests/scripts/test_config_script.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python3
+
+"""Test helper for the Mbed TLS configuration file tool
+
+Run config.py with various parameters and write the results to files.
+
+This is a harness to help regression testing, not a functional tester.
+Sample usage:
+
+ test_config_script.py -d old
+ ## Modify config.py and/or mbedtls_config.h ##
+ test_config_script.py -d new
+ diff -ru old new
+"""
+
+## Copyright The Mbed TLS Contributors
+## SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+##
+
+import argparse
+import glob
+import os
+import re
+import shutil
+import subprocess
+
+OUTPUT_FILE_PREFIX = 'config-'
+
+def output_file_name(directory, stem, extension):
+ return os.path.join(directory,
+ '{}{}.{}'.format(OUTPUT_FILE_PREFIX,
+ stem, extension))
+
+def cleanup_directory(directory):
+ """Remove old output files."""
+ for extension in []:
+ pattern = output_file_name(directory, '*', extension)
+ filenames = glob.glob(pattern)
+ for filename in filenames:
+ os.remove(filename)
+
+def prepare_directory(directory):
+ """Create the output directory if it doesn't exist yet.
+
+ If there are old output files, remove them.
+ """
+ if os.path.exists(directory):
+ cleanup_directory(directory)
+ else:
+ os.makedirs(directory)
+
+def guess_presets_from_help(help_text):
+ """Figure out what presets the script supports.
+
+ help_text should be the output from running the script with --help.
+ """
+ # Try the output format from config.py
+ hits = re.findall(r'\{([-\w,]+)\}', help_text)
+ for hit in hits:
+ words = set(hit.split(','))
+ if 'get' in words and 'set' in words and 'unset' in words:
+ words.remove('get')
+ words.remove('set')
+ words.remove('unset')
+ return words
+ # Try the output format from config.pl
+ hits = re.findall(r'\n +([-\w]+) +- ', help_text)
+ if hits:
+ return hits
+ raise Exception("Unable to figure out supported presets. Pass the '-p' option.")
+
+def list_presets(options):
+ """Return the list of presets to test.
+
+ The list is taken from the command line if present, otherwise it is
+ extracted from running the config script with --help.
+ """
+ if options.presets:
+ return re.split(r'[ ,]+', options.presets)
+ else:
+ help_text = subprocess.run([options.script, '--help'],
+ check=False, # config.pl --help returns 255
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT).stdout
+ return guess_presets_from_help(help_text.decode('ascii'))
+
+def run_one(options, args, stem_prefix='', input_file=None):
+ """Run the config script with the given arguments.
+
+ Take the original content from input_file if specified, defaulting
+ to options.input_file if input_file is None.
+
+ Write the following files, where xxx contains stem_prefix followed by
+ a filename-friendly encoding of args:
+ * config-xxx.h: modified file.
+ * config-xxx.out: standard output.
+ * config-xxx.err: standard output.
+ * config-xxx.status: exit code.
+
+ Return ("xxx+", "path/to/config-xxx.h") which can be used as
+ stem_prefix and input_file to call this function again with new args.
+ """
+ if input_file is None:
+ input_file = options.input_file
+ stem = stem_prefix + '-'.join(args)
+ data_filename = output_file_name(options.output_directory, stem, 'h')
+ stdout_filename = output_file_name(options.output_directory, stem, 'out')
+ stderr_filename = output_file_name(options.output_directory, stem, 'err')
+ status_filename = output_file_name(options.output_directory, stem, 'status')
+ shutil.copy(input_file, data_filename)
+ # Pass only the file basename, not the full path, to avoid getting the
+ # directory name in error messages, which would make comparisons
+ # between output directories more difficult.
+ cmd = [os.path.abspath(options.script),
+ '-f', os.path.basename(data_filename)]
+ with open(stdout_filename, 'wb') as out:
+ with open(stderr_filename, 'wb') as err:
+ status = subprocess.call(cmd + args,
+ cwd=options.output_directory,
+ stdin=subprocess.DEVNULL,
+ stdout=out, stderr=err)
+ with open(status_filename, 'w') as status_file:
+ status_file.write('{}\n'.format(status))
+ return stem + "+", data_filename
+
+### A list of symbols to test with.
+### This script currently tests what happens when you change a symbol from
+### having a value to not having a value or vice versa. This is not
+### necessarily useful behavior, and we may not consider it a bug if
+### config.py stops handling that case correctly.
+TEST_SYMBOLS = [
+ 'CUSTOM_SYMBOL', # does not exist
+ 'MBEDTLS_AES_C', # set, no value
+ 'MBEDTLS_MPI_MAX_SIZE', # unset, has a value
+ 'MBEDTLS_NO_UDBL_DIVISION', # unset, in "System support"
+ 'MBEDTLS_PLATFORM_ZEROIZE_ALT', # unset, in "Customisation configuration options"
+]
+
+def run_all(options):
+ """Run all the command lines to test."""
+ presets = list_presets(options)
+ for preset in presets:
+ run_one(options, [preset])
+ for symbol in TEST_SYMBOLS:
+ run_one(options, ['get', symbol])
+ (stem, filename) = run_one(options, ['set', symbol])
+ run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
+ run_one(options, ['--force', 'set', symbol])
+ (stem, filename) = run_one(options, ['set', symbol, 'value'])
+ run_one(options, ['get', symbol], stem_prefix=stem, input_file=filename)
+ run_one(options, ['--force', 'set', symbol, 'value'])
+ run_one(options, ['unset', symbol])
+
+def main():
+ """Command line entry point."""
+ parser = argparse.ArgumentParser(description=__doc__,
+ formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('-d', metavar='DIR',
+ dest='output_directory', required=True,
+ help="""Output directory.""")
+ parser.add_argument('-f', metavar='FILE',
+ dest='input_file', default='include/mbedtls/mbedtls_config.h',
+ help="""Config file (default: %(default)s).""")
+ parser.add_argument('-p', metavar='PRESET,...',
+ dest='presets',
+ help="""Presets to test (default: guessed from --help).""")
+ parser.add_argument('-s', metavar='FILE',
+ dest='script', default='scripts/config.py',
+ help="""Configuration script (default: %(default)s).""")
+ options = parser.parse_args()
+ prepare_directory(options.output_directory)
+ run_all(options)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/test_generate_test_code.py b/tests/scripts/test_generate_test_code.py
new file mode 100755
index 0000000..abc46a7
--- /dev/null
+++ b/tests/scripts/test_generate_test_code.py
@@ -0,0 +1,1915 @@
+#!/usr/bin/env python3
+# Unit test for generate_test_code.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Unit tests for generate_test_code.py
+"""
+
+from io import StringIO
+from unittest import TestCase, main as unittest_main
+from unittest.mock import patch
+
+from generate_test_code import gen_dependencies, gen_dependencies_one_line
+from generate_test_code import gen_function_wrapper, gen_dispatch
+from generate_test_code import parse_until_pattern, GeneratorInputError
+from generate_test_code import parse_suite_dependencies
+from generate_test_code import parse_function_dependencies
+from generate_test_code import parse_function_arguments, parse_function_code
+from generate_test_code import parse_functions, END_HEADER_REGEX
+from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split
+from generate_test_code import parse_test_data, gen_dep_check
+from generate_test_code import gen_expression_check, write_dependencies
+from generate_test_code import write_parameters, gen_suite_dep_checks
+from generate_test_code import gen_from_test_data
+
+
+class GenDep(TestCase):
+ """
+ Test suite for function gen_dep()
+ """
+
+ def test_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['DEP1', 'DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_disabled_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', '!DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if !defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* !DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* !DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_mixed_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', 'DEP2']
+ dep_start, dep_end = gen_dependencies(dependencies)
+ preprocessor1, preprocessor2 = dep_start.splitlines()
+ endif1, endif2 = dep_end.splitlines()
+ self.assertEqual(preprocessor1, '#if !defined(DEP1)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(preprocessor2, '#if defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif1, '#endif /* DEP2 */',
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(endif2, '#endif /* !DEP1 */',
+ 'Preprocessor generated incorrectly')
+
+ def test_empty_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ dep_start, dep_end = gen_dependencies(dependencies)
+ self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly')
+ self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly')
+
+ def test_large_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ count = 10
+ for i in range(count):
+ dependencies.append('DEP%d' % i)
+ dep_start, dep_end = gen_dependencies(dependencies)
+ self.assertEqual(len(dep_start.splitlines()), count,
+ 'Preprocessor generated incorrectly')
+ self.assertEqual(len(dep_end.splitlines()), count,
+ 'Preprocessor generated incorrectly')
+
+
+class GenDepOneLine(TestCase):
+ """
+ Test Suite for testing gen_dependencies_one_line()
+ """
+
+ def test_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['DEP1', 'DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_disabled_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', '!DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_mixed_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = ['!DEP1', 'DEP2']
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)',
+ 'Preprocessor generated incorrectly')
+
+ def test_empty_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ dep_str = gen_dependencies_one_line(dependencies)
+ self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly')
+
+ def test_large_dependencies_list(self):
+ """
+ Test that gen_dep() correctly creates dependencies for given
+ dependency list.
+ :return:
+ """
+ dependencies = []
+ count = 10
+ for i in range(count):
+ dependencies.append('DEP%d' % i)
+ dep_str = gen_dependencies_one_line(dependencies)
+ expected = '#if ' + ' && '.join(['defined(%s)' %
+ x for x in dependencies])
+ self.assertEqual(dep_str, expected,
+ 'Preprocessor generated incorrectly')
+
+
+class GenFunctionWrapper(TestCase):
+ """
+ Test Suite for testing gen_function_wrapper()
+ """
+
+ def test_params_unpack(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd'))
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+
+ test_a( a, b, c, d );
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_local(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a',
+ 'int x = 1;', ('x', 'b', 'c', 'd'))
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+int x = 1;
+ test_a( x, b, c, d );
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_empty_params(self):
+ """
+ Test that params are properly unpacked in the function call.
+
+ :return:
+ """
+ code = gen_function_wrapper('test_a', '', ())
+ expected = '''
+void test_a_wrapper( void ** params )
+{
+ (void)params;
+
+ test_a( );
+}
+'''
+ self.assertEqual(code, expected)
+
+
+class GenDispatch(TestCase):
+ """
+ Test suite for testing gen_dispatch()
+ """
+
+ def test_dispatch(self):
+ """
+ Test that dispatch table entry is generated correctly.
+ :return:
+ """
+ code = gen_dispatch('test_a', ['DEP1', 'DEP2'])
+ expected = '''
+#if defined(DEP1) && defined(DEP2)
+ test_a_wrapper,
+#else
+ NULL,
+#endif
+'''
+ self.assertEqual(code, expected)
+
+ def test_empty_dependencies(self):
+ """
+ Test empty dependency list.
+ :return:
+ """
+ code = gen_dispatch('test_a', [])
+ expected = '''
+ test_a_wrapper,
+'''
+ self.assertEqual(code, expected)
+
+
+class StringIOWrapper(StringIO):
+ """
+ file like class to mock file object in tests.
+ """
+ def __init__(self, file_name, data, line_no=0):
+ """
+ Init file handle.
+
+ :param file_name:
+ :param data:
+ :param line_no:
+ """
+ super(StringIOWrapper, self).__init__(data)
+ self.line_no = line_no
+ self.name = file_name
+
+ def next(self):
+ """
+ Iterator method. This method overrides base class's
+ next method and extends the next method to count the line
+ numbers as each line is read.
+
+ :return: Line read from file.
+ """
+ parent = super(StringIOWrapper, self)
+ line = parent.__next__()
+ return line
+
+ def readline(self, _length=0):
+ """
+ Wrap the base class readline.
+
+ :param length:
+ :return:
+ """
+ line = super(StringIOWrapper, self).readline()
+ if line is not None:
+ self.line_no += 1
+ return line
+
+
+class ParseUntilPattern(TestCase):
+ """
+ Test Suite for testing parse_until_pattern().
+ """
+
+ def test_suite_headers(self):
+ """
+ Test that suite headers are parsed correctly.
+
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ expected = '''#line 1 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data, line_no=0)
+ headers = parse_until_pattern(stream, END_HEADER_REGEX)
+ self.assertEqual(headers, expected)
+
+ def test_line_no(self):
+ """
+ Test that #line is set to correct line no. in source .function file.
+
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ offset_line_no = 5
+ expected = '''#line %d "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+''' % (offset_line_no + 1)
+ stream = StringIOWrapper('test_suite_ut.function', data,
+ offset_line_no)
+ headers = parse_until_pattern(stream, END_HEADER_REGEX)
+ self.assertEqual(headers, expected)
+
+ def test_no_end_header_comment(self):
+ """
+ Test that InvalidFileFormat is raised when end header comment is
+ missing.
+ :return:
+ """
+ data = '''#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_until_pattern, stream,
+ END_HEADER_REGEX)
+
+
+class ParseSuiteDependencies(TestCase):
+ """
+ Test Suite for testing parse_suite_dependencies().
+ """
+
+ def test_suite_dependencies(self):
+ """
+
+ :return:
+ """
+ data = '''
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+ expected = ['MBEDTLS_ECP_C']
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ dependencies = parse_suite_dependencies(stream)
+ self.assertEqual(dependencies, expected)
+
+ def test_no_end_dep_comment(self):
+ """
+ Test that InvalidFileFormat is raised when end dep comment is missing.
+ :return:
+ """
+ data = '''
+* depends_on:MBEDTLS_ECP_C
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_suite_dependencies,
+ stream)
+
+ def test_dependencies_split(self):
+ """
+ Test that InvalidFileFormat is raised when end dep comment is missing.
+ :return:
+ """
+ data = '''
+ * depends_on:MBEDTLS_ECP_C:A:B: C : D :F : G: !H
+ * END_DEPENDENCIES
+ */
+'''
+ expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H']
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ dependencies = parse_suite_dependencies(stream)
+ self.assertEqual(dependencies, expected)
+
+
+class ParseFuncDependencies(TestCase):
+ """
+ Test Suite for testing parse_function_dependencies()
+ """
+
+ def test_function_dependencies(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */'
+ expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO']
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, expected)
+
+ def test_no_dependencies(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE */'
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, [])
+
+ def test_tolerance(self):
+ """
+ Test that parse_function_dependencies() correctly parses function
+ dependencies.
+ :return:
+ """
+ line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/'
+ dependencies = parse_function_dependencies(line)
+ self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F'])
+
+
+class ParseFuncSignature(TestCase):
+ """
+ Test Suite for parse_function_arguments().
+ """
+
+ def test_int_and_char_params(self):
+ """
+ Test int and char parameters parsing
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, int b, int result )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, ['char*', 'int', 'int'])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch,
+ ['(char *) params[0]',
+ '((mbedtls_test_argument_t *) params[1])->sint',
+ '((mbedtls_test_argument_t *) params[2])->sint'])
+
+ def test_hex_params(self):
+ """
+ Test hex parameters parsing
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, data_t * h, int result )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, ['char*', 'hex', 'int'])
+ self.assertEqual(local,
+ ' data_t data1 = {(uint8_t *) params[1], '
+ '((mbedtls_test_argument_t *) params[2])->len};\n')
+ self.assertEqual(arg_dispatch, ['(char *) params[0]',
+ '&data1',
+ '((mbedtls_test_argument_t *) params[3])->sint'])
+
+ def test_unsupported_arg(self):
+ """
+ Test unsupported argument type
+ :return:
+ """
+ line = 'void entropy_threshold( char * a, data_t * h, unknown_t result )'
+ self.assertRaises(ValueError, parse_function_arguments, line)
+
+ def test_empty_params(self):
+ """
+ Test no parameters (nothing between parentheses).
+ :return:
+ """
+ line = 'void entropy_threshold()'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, [])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, [])
+
+ def test_blank_params(self):
+ """
+ Test no parameters (space between parentheses).
+ :return:
+ """
+ line = 'void entropy_threshold( )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, [])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, [])
+
+ def test_void_params(self):
+ """
+ Test no parameters (void keyword).
+ :return:
+ """
+ line = 'void entropy_threshold(void)'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, [])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, [])
+
+ def test_void_space_params(self):
+ """
+ Test no parameters (void with spaces).
+ :return:
+ """
+ line = 'void entropy_threshold( void )'
+ args, local, arg_dispatch = parse_function_arguments(line)
+ self.assertEqual(args, [])
+ self.assertEqual(local, '')
+ self.assertEqual(arg_dispatch, [])
+
+
+class ParseFunctionCode(TestCase):
+ """
+ Test suite for testing parse_function_code()
+ """
+
+ def test_no_function(self):
+ """
+ Test no test function found.
+ :return:
+ """
+ data = '''
+No
+test
+function
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err_msg = 'file: test_suite_ut.function - Test functions not found!'
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ def test_no_end_case_comment(self):
+ """
+ Test missing end case.
+ :return:
+ """
+ data = '''
+void test_func()
+{
+}
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err_msg = r'file: test_suite_ut.function - '\
+ 'end case pattern .*? not found!'
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ @patch("generate_test_code.parse_function_arguments")
+ def test_function_called(self,
+ parse_function_arguments_mock):
+ """
+ Test parse_function_code()
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ data = '''
+void test_func()
+{
+}
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_function_code,
+ stream, [], [])
+ self.assertTrue(parse_function_arguments_mock.called)
+ parse_function_arguments_mock.assert_called_with('void test_func()\n')
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_return(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test generated code.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func()
+{
+ ba ba black sheep
+ have you any wool
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ name, arg, code, dispatch_code = parse_function_code(stream, [], [])
+
+ self.assertTrue(parse_function_arguments_mock.called)
+ parse_function_arguments_mock.assert_called_with('void func()\n')
+ gen_function_wrapper_mock.assert_called_with('test_func', '', [])
+ self.assertEqual(name, 'test_func')
+ self.assertEqual(arg, [])
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func(void)
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ ;
+}
+'''
+ self.assertEqual(code, expected)
+ self.assertEqual(dispatch_code, "\n test_func_wrapper,\n")
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_with_exit_label(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test when exit label is present.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func(void)
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ def test_non_void_function(self):
+ """
+ Test invalid signature (non void).
+ :return:
+ """
+ data = 'int entropy_threshold( char * a, data_t * h, int result )'
+ err_msg = 'file: test_suite_ut.function - Test functions not found!'
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaisesRegex(GeneratorInputError, err_msg,
+ parse_function_code, stream, [], [])
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_function_name_on_newline(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test with line break before the function name.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void
+
+
+func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void
+
+
+test_func(void)
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_case_starting_with_comment(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test with comments before the function signature
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''/* comment */
+/* more
+ * comment */
+// this is\\
+still \\
+a comment
+void func()
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+
+
+
+
+
+void test_func(void)
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_comment_in_prototype(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test with comments in the function prototype
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func( int x, // (line \\
+ comment)
+ int y /* lone closing parenthesis) */ )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func( int x,
+
+ int y )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_line_comment_in_block_comment(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test with line comment in block comment.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+void func( int x /* // */ )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+void test_func( int x )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+ @patch("generate_test_code.gen_dispatch")
+ @patch("generate_test_code.gen_dependencies")
+ @patch("generate_test_code.gen_function_wrapper")
+ @patch("generate_test_code.parse_function_arguments")
+ def test_block_comment_in_line_comment(self, parse_function_arguments_mock,
+ gen_function_wrapper_mock,
+ gen_dependencies_mock,
+ gen_dispatch_mock):
+ """
+ Test with block comment in line comment.
+ :return:
+ """
+ parse_function_arguments_mock.return_value = ([], '', [])
+ gen_function_wrapper_mock.return_value = ''
+ gen_dependencies_mock.side_effect = gen_dependencies
+ gen_dispatch_mock.side_effect = gen_dispatch
+ data = '''
+// /*
+void func( int x )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ _, _, code, _ = parse_function_code(stream, [], [])
+
+ expected = '''#line 1 "test_suite_ut.function"
+
+
+void test_func( int x )
+{
+ ba ba black sheep
+ have you any wool
+exit:
+ yes sir yes sir
+ 3 bags full
+}
+'''
+ self.assertEqual(code, expected)
+
+
+class ParseFunction(TestCase):
+ """
+ Test Suite for testing parse_functions()
+ """
+
+ @patch("generate_test_code.parse_until_pattern")
+ def test_begin_header(self, parse_until_pattern_mock):
+ """
+ Test that begin header is checked and parse_until_pattern() is called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_until_pattern_mock.side_effect = stop
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_until_pattern")
+ def test_begin_helper(self, parse_until_pattern_mock):
+ """
+ Test that begin helper is checked and parse_until_pattern() is called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_until_pattern_mock.side_effect = stop
+ data = '''/* BEGIN_SUITE_HELPERS */
+void print_hello_world()
+{
+ printf("Hello World!\n");
+}
+/* END_SUITE_HELPERS */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_until_pattern_mock.assert_called_with(stream,
+ END_SUITE_HELPERS_REGEX)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_suite_dependencies")
+ def test_begin_dep(self, parse_suite_dependencies_mock):
+ """
+ Test that begin dep is checked and parse_suite_dependencies() is
+ called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ parse_suite_dependencies_mock.side_effect = stop
+ data = '''/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ parse_suite_dependencies_mock.assert_called_with(stream)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_function_dependencies")
+ def test_begin_function_dep(self, func_mock):
+ """
+ Test that begin dep is checked and parse_function_dependencies() is
+ called.
+ :return:
+ """
+ def stop(*_unused):
+ """Stop when parse_until_pattern is called."""
+ raise Exception
+ func_mock.side_effect = stop
+
+ dependencies_str = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+ data = '''%svoid test_func()
+{
+}
+''' % dependencies_str
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(Exception, parse_functions, stream)
+ func_mock.assert_called_with(dependencies_str)
+ self.assertEqual(stream.line_no, 1)
+
+ @patch("generate_test_code.parse_function_code")
+ @patch("generate_test_code.parse_function_dependencies")
+ def test_return(self, func_mock1, func_mock2):
+ """
+ Test that begin case is checked and parse_function_code() is called.
+ :return:
+ """
+ func_mock1.return_value = []
+ in_func_code = '''void test_func()
+{
+}
+'''
+ func_dispatch = '''
+ test_func_wrapper,
+'''
+ func_mock2.return_value = 'test_func', [],\
+ in_func_code, func_dispatch
+ dependencies_str = '/* BEGIN_CASE ' \
+ 'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
+ data = '''%svoid test_func()
+{
+}
+''' % dependencies_str
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(stream)
+ func_mock1.assert_called_with(dependencies_str)
+ func_mock2.assert_called_with(stream, [], [])
+ self.assertEqual(stream.line_no, 5)
+ self.assertEqual(suite_dependencies, [])
+ expected_dispatch_code = '''/* Function Id: 0 */
+
+ test_func_wrapper,
+'''
+ self.assertEqual(dispatch_code, expected_dispatch_code)
+ self.assertEqual(func_code, in_func_code)
+ self.assertEqual(func_info, {'test_func': (0, [])})
+
+ def test_parsing(self):
+ """
+ Test case parsing.
+ :return:
+ """
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func1()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func2()
+{
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ suite_dependencies, dispatch_code, func_code, func_info = \
+ parse_functions(stream)
+ self.assertEqual(stream.line_no, 23)
+ self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C'])
+
+ expected_dispatch_code = '''/* Function Id: 0 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+ test_func1_wrapper,
+#else
+ NULL,
+#endif
+/* Function Id: 1 */
+
+#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
+ test_func2_wrapper,
+#else
+ NULL,
+#endif
+'''
+ self.assertEqual(dispatch_code, expected_dispatch_code)
+ expected_func_code = '''#if defined(MBEDTLS_ECP_C)
+#line 2 "test_suite_ut.function"
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 13 "test_suite_ut.function"
+void test_func1(void)
+{
+exit:
+ ;
+}
+
+void test_func1_wrapper( void ** params )
+{
+ (void)params;
+
+ test_func1( );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#if defined(MBEDTLS_ENTROPY_NV_SEED)
+#if defined(MBEDTLS_FS_IO)
+#line 19 "test_suite_ut.function"
+void test_func2(void)
+{
+exit:
+ ;
+}
+
+void test_func2_wrapper( void ** params )
+{
+ (void)params;
+
+ test_func2( );
+}
+#endif /* MBEDTLS_FS_IO */
+#endif /* MBEDTLS_ENTROPY_NV_SEED */
+#endif /* MBEDTLS_ECP_C */
+'''
+ self.assertEqual(func_code, expected_func_code)
+ self.assertEqual(func_info, {'test_func1': (0, []),
+ 'test_func2': (1, [])})
+
+ def test_same_function_name(self):
+ """
+ Test name conflict.
+ :return:
+ """
+ data = '''/* BEGIN_HEADER */
+#include "mbedtls/ecp.h"
+
+#define ECP_PF_UNKNOWN -1
+/* END_HEADER */
+
+/* BEGIN_DEPENDENCIES
+ * depends_on:MBEDTLS_ECP_C
+ * END_DEPENDENCIES
+ */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+
+/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
+void func()
+{
+}
+/* END_CASE */
+'''
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ self.assertRaises(GeneratorInputError, parse_functions, stream)
+
+
+class EscapedSplit(TestCase):
+ """
+ Test suite for testing escaped_split().
+ Note: Since escaped_split() output is used to write back to the
+ intermediate data file. Any escape characters in the input are
+ retained in the output.
+ """
+
+ def test_invalid_input(self):
+ """
+ Test when input split character is not a character.
+ :return:
+ """
+ self.assertRaises(ValueError, escaped_split, '', 'string')
+
+ def test_empty_string(self):
+ """
+ Test empty string input.
+ :return:
+ """
+ splits = escaped_split('', ':')
+ self.assertEqual(splits, [])
+
+ def test_no_escape(self):
+ """
+ Test with no escape character. The behaviour should be same as
+ str.split()
+ :return:
+ """
+ test_str = 'yahoo:google'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, test_str.split(':'))
+
+ def test_escaped_input(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\:google:facebook'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\:google', 'facebook'])
+
+ def test_escaped_escape(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\\:google:facebook'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook'])
+
+ def test_all_at_once(self):
+ """
+ Test input that has escaped delimiter.
+ :return:
+ """
+ test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia'
+ splits = escaped_split(test_str, ':')
+ self.assertEqual(splits, [r'yahoo\\', r'google',
+ r'facebook\:instagram\\',
+ r'bbc\\', r'wikipedia'])
+
+
+class ParseTestData(TestCase):
+ """
+ Test suite for parse test data.
+ """
+
+ def test_parser(self):
+ """
+ Test that tests are parsed correctly from data file.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+Diffie-Hellman full exchange #3
+dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271"
+
+Diffie-Hellman selftest
+dhm_selftest:
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ # List of (name, function_name, dependencies, args)
+ tests = list(parse_test_data(stream))
+ test1, test2, test3, test4 = tests
+ self.assertEqual(test1[0], 3)
+ self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
+ self.assertEqual(test1[2], 'dhm_do_dhm')
+ self.assertEqual(test1[3], [])
+ self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
+
+ self.assertEqual(test2[0], 6)
+ self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
+ self.assertEqual(test2[2], 'dhm_do_dhm')
+ self.assertEqual(test2[3], [])
+ self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
+ '10', '"9345098304850938450983409622"'])
+
+ self.assertEqual(test3[0], 9)
+ self.assertEqual(test3[1], 'Diffie-Hellman full exchange #3')
+ self.assertEqual(test3[2], 'dhm_do_dhm')
+ self.assertEqual(test3[3], [])
+ self.assertEqual(test3[4], ['10',
+ '"9345098382739712938719287391879381271"',
+ '10',
+ '"9345098792137312973297123912791271"'])
+
+ self.assertEqual(test4[0], 12)
+ self.assertEqual(test4[1], 'Diffie-Hellman selftest')
+ self.assertEqual(test4[2], 'dhm_selftest')
+ self.assertEqual(test4[3], [])
+ self.assertEqual(test4[4], [])
+
+ def test_with_dependencies(self):
+ """
+ Test that tests with dependencies are parsed.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+dhm_do_dhm:10:"23":10:"5"
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ # List of (name, function_name, dependencies, args)
+ tests = list(parse_test_data(stream))
+ test1, test2 = tests
+ self.assertEqual(test1[0], 4)
+ self.assertEqual(test1[1], 'Diffie-Hellman full exchange #1')
+ self.assertEqual(test1[2], 'dhm_do_dhm')
+ self.assertEqual(test1[3], ['YAHOO'])
+ self.assertEqual(test1[4], ['10', '"23"', '10', '"5"'])
+
+ self.assertEqual(test2[0], 7)
+ self.assertEqual(test2[1], 'Diffie-Hellman full exchange #2')
+ self.assertEqual(test2[2], 'dhm_do_dhm')
+ self.assertEqual(test2[3], [])
+ self.assertEqual(test2[4], ['10', '"93450983094850938450983409623"',
+ '10', '"9345098304850938450983409622"'])
+
+ def test_no_args(self):
+ """
+ Test GeneratorInputError is raised when test function name and
+ args line is missing.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+
+
+Diffie-Hellman full exchange #2
+dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
+
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err = None
+ try:
+ for _, _, _, _, _ in parse_test_data(stream):
+ pass
+ except GeneratorInputError as err:
+ self.assertEqual(type(err), GeneratorInputError)
+
+ def test_incomplete_data(self):
+ """
+ Test GeneratorInputError is raised when test function name
+ and args line is missing.
+ :return:
+ """
+ data = """
+Diffie-Hellman full exchange #1
+depends_on:YAHOO
+"""
+ stream = StringIOWrapper('test_suite_ut.function', data)
+ err = None
+ try:
+ for _, _, _, _, _ in parse_test_data(stream):
+ pass
+ except GeneratorInputError as err:
+ self.assertEqual(type(err), GeneratorInputError)
+
+
+class GenDepCheck(TestCase):
+ """
+ Test suite for gen_dep_check(). It is assumed this function is
+ called with valid inputs.
+ """
+
+ def test_gen_dep_check(self):
+ """
+ Test that dependency check code generated correctly.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+#if defined(YAHOO)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;"""
+ out = gen_dep_check(5, 'YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_not_defined_dependency(self):
+ """
+ Test dependency with !.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+#if !defined(YAHOO)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;"""
+ out = gen_dep_check(5, '!YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_empty_dependency(self):
+ """
+ Test invalid dependency input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!')
+
+ def test_negative_dep_id(self):
+ """
+ Test invalid dependency input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO')
+
+
+class GenExpCheck(TestCase):
+ """
+ Test suite for gen_expression_check(). It is assumed this function
+ is called with valid inputs.
+ """
+
+ def test_gen_exp_check(self):
+ """
+ Test that expression check code generated correctly.
+ :return:
+ """
+ expected = """
+ case 5:
+ {
+ *out_value = YAHOO;
+ }
+ break;"""
+ out = gen_expression_check(5, 'YAHOO')
+ self.assertEqual(out, expected)
+
+ def test_invalid_expression(self):
+ """
+ Test invalid expression input.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_expression_check, 5, '')
+
+ def test_negative_exp_id(self):
+ """
+ Test invalid expression id.
+ :return:
+ """
+ self.assertRaises(GeneratorInputError, gen_expression_check,
+ -1, 'YAHOO')
+
+
+class WriteDependencies(TestCase):
+ """
+ Test suite for testing write_dependencies.
+ """
+
+ def test_no_test_dependencies(self):
+ """
+ Test when test dependencies input is empty.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = write_dependencies(stream, [], unique_dependencies)
+ self.assertEqual(dep_check_code, '')
+ self.assertEqual(len(unique_dependencies), 0)
+ self.assertEqual(stream.getvalue(), '')
+
+ def test_unique_dep_ids(self):
+ """
+
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'],
+ unique_dependencies)
+ expect_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP3)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 2:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expect_dep_check_code)
+ self.assertEqual(len(unique_dependencies), 3)
+ self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n')
+
+ def test_dep_id_repeat(self):
+ """
+
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_dependencies = []
+ dep_check_code = ''
+ dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'],
+ unique_dependencies)
+ dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'],
+ unique_dependencies)
+ dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'],
+ unique_dependencies)
+ expect_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP3)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 2:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expect_dep_check_code)
+ self.assertEqual(len(unique_dependencies), 3)
+ self.assertEqual(stream.getvalue(),
+ 'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n')
+
+
+class WriteParams(TestCase):
+ """
+ Test Suite for testing write_parameters().
+ """
+
+ def test_no_params(self):
+ """
+ Test with empty test_args
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream, [], [], unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(), '\n')
+
+ def test_no_exp_param(self):
+ """
+ Test when there is no macro or expression in the params.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"',
+ '0'],
+ ['char*', 'hex', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0\n')
+
+ def test_hex_format_int_param(self):
+ """
+ Test int parameter in hex format.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream,
+ ['"Yahoo"', '"abcdef00"', '0xAA'],
+ ['char*', 'hex', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 0)
+ self.assertEqual(expression_code, '')
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0xAA\n')
+
+ def test_with_exp_param(self):
+ """
+ Test when there is macro or expression in the params.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = write_parameters(stream,
+ ['"Yahoo"', '"abcdef00"', '0',
+ 'MACRO1', 'MACRO2', 'MACRO3'],
+ ['char*', 'hex', 'int',
+ 'int', 'int', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 3)
+ self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;
+ case 2:
+ {
+ *out_value = MACRO3;
+ }
+ break;'''
+ self.assertEqual(expression_code, expected_expression_code)
+ self.assertEqual(stream.getvalue(),
+ ':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1'
+ ':exp:2\n')
+
+ def test_with_repeat_calls(self):
+ """
+ Test when write_parameter() is called with same macro or expression.
+ :return:
+ """
+ stream = StringIOWrapper('test_suite_ut.data', '')
+ unique_expressions = []
+ expression_code = ''
+ expression_code += write_parameters(stream,
+ ['"Yahoo"', 'MACRO1', 'MACRO2'],
+ ['char*', 'int', 'int'],
+ unique_expressions)
+ expression_code += write_parameters(stream,
+ ['"abcdef00"', 'MACRO2', 'MACRO3'],
+ ['hex', 'int', 'int'],
+ unique_expressions)
+ expression_code += write_parameters(stream,
+ ['0', 'MACRO3', 'MACRO1'],
+ ['int', 'int', 'int'],
+ unique_expressions)
+ self.assertEqual(len(unique_expressions), 3)
+ self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;
+ case 2:
+ {
+ *out_value = MACRO3;
+ }
+ break;'''
+ self.assertEqual(expression_code, expected_expression_code)
+ expected_data_file = ''':char*:"Yahoo":exp:0:exp:1
+:hex:"abcdef00":exp:1:exp:2
+:int:0:exp:2:exp:0
+'''
+ self.assertEqual(stream.getvalue(), expected_data_file)
+
+
+class GenTestSuiteDependenciesChecks(TestCase):
+ """
+ Test suite for testing gen_suite_dep_checks()
+ """
+ def test_empty_suite_dependencies(self):
+ """
+ Test with empty suite_dependencies list.
+
+ :return:
+ """
+ dep_check_code, expression_code = \
+ gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE')
+ self.assertEqual(dep_check_code, 'DEP_CHECK_CODE')
+ self.assertEqual(expression_code, 'EXPRESSION_CODE')
+
+ def test_suite_dependencies(self):
+ """
+ Test with suite_dependencies list.
+
+ :return:
+ """
+ dep_check_code, expression_code = \
+ gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE',
+ 'EXPRESSION_CODE')
+ expected_dep_check_code = '''
+#if defined(SUITE_DEP)
+DEP_CHECK_CODE
+#endif
+'''
+ expected_expression_code = '''
+#if defined(SUITE_DEP)
+EXPRESSION_CODE
+#endif
+'''
+ self.assertEqual(dep_check_code, expected_dep_check_code)
+ self.assertEqual(expression_code, expected_expression_code)
+
+ def test_no_dep_no_exp(self):
+ """
+ Test when there are no dependency and expression code.
+ :return:
+ """
+ dep_check_code, expression_code = gen_suite_dep_checks([], '', '')
+ self.assertEqual(dep_check_code, '')
+ self.assertEqual(expression_code, '')
+
+
+class GenFromTestData(TestCase):
+ """
+ Test suite for gen_from_test_data()
+ """
+
+ @staticmethod
+ @patch("generate_test_code.write_dependencies")
+ @patch("generate_test_code.write_parameters")
+ @patch("generate_test_code.gen_suite_dep_checks")
+ def test_intermediate_data_file(func_mock1,
+ write_parameters_mock,
+ write_dependencies_mock):
+ """
+ Test that intermediate data file is written with expected data.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func1': (1, ('int',))}
+ suite_dependencies = []
+ write_parameters_mock.side_effect = write_parameters
+ write_dependencies_mock.side_effect = write_dependencies
+ func_mock1.side_effect = gen_suite_dep_checks
+ gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies)
+ write_dependencies_mock.assert_called_with(out_data_f,
+ ['DEP1'], ['DEP1'])
+ write_parameters_mock.assert_called_with(out_data_f, ['0'],
+ ('int',), [])
+ expected_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ func_mock1.assert_called_with(
+ suite_dependencies, expected_dep_check_code, '')
+
+ def test_function_not_found(self):
+ """
+ Test that AssertError is raised when function info in not found.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func2': (1, ('int',))}
+ suite_dependencies = []
+ self.assertRaises(GeneratorInputError, gen_from_test_data,
+ data_f, out_data_f, func_info, suite_dependencies)
+
+ def test_different_func_args(self):
+ """
+ Test that AssertError is raised when no. of parameters and
+ function args differ.
+ :return:
+ """
+ data = '''
+My test
+depends_on:DEP1
+func1:0
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func2': (1, ('int', 'hex'))}
+ suite_dependencies = []
+ self.assertRaises(GeneratorInputError, gen_from_test_data, data_f,
+ out_data_f, func_info, suite_dependencies)
+
+ def test_output(self):
+ """
+ Test that intermediate data file is written with expected data.
+ :return:
+ """
+ data = '''
+My test 1
+depends_on:DEP1
+func1:0:0xfa:MACRO1:MACRO2
+
+My test 2
+depends_on:DEP1:DEP2
+func2:"yahoo":88:MACRO1
+'''
+ data_f = StringIOWrapper('test_suite_ut.data', data)
+ out_data_f = StringIOWrapper('test_suite_ut.datax', '')
+ func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')),
+ 'test_func2': (1, ('char*', 'int', 'int'))}
+ suite_dependencies = []
+ dep_check_code, expression_code = \
+ gen_from_test_data(data_f, out_data_f, func_info,
+ suite_dependencies)
+ expected_dep_check_code = '''
+ case 0:
+ {
+#if defined(DEP1)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;
+ case 1:
+ {
+#if defined(DEP2)
+ ret = DEPENDENCY_SUPPORTED;
+#else
+ ret = DEPENDENCY_NOT_SUPPORTED;
+#endif
+ }
+ break;'''
+ expected_data = '''My test 1
+depends_on:0
+0:int:0:int:0xfa:exp:0:exp:1
+
+My test 2
+depends_on:0:1
+1:char*:"yahoo":int:88:exp:0
+
+'''
+ expected_expression_code = '''
+ case 0:
+ {
+ *out_value = MACRO1;
+ }
+ break;
+ case 1:
+ {
+ *out_value = MACRO2;
+ }
+ break;'''
+ self.assertEqual(dep_check_code, expected_dep_check_code)
+ self.assertEqual(out_data_f.getvalue(), expected_data)
+ self.assertEqual(expression_code, expected_expression_code)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff --git a/tests/scripts/test_psa_compliance.py b/tests/scripts/test_psa_compliance.py
new file mode 100755
index 0000000..8d70cbc
--- /dev/null
+++ b/tests/scripts/test_psa_compliance.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+"""Run the PSA Crypto API compliance test suite.
+Clone the repo and check out the commit specified by PSA_ARCH_TEST_REPO and PSA_ARCH_TEST_REF,
+then compile and run the test suite. The clone is stored at <repository root>/psa-arch-tests.
+Known defects in either the test suite or mbedtls / TF-PSA-Crypto - identified by their test
+number - are ignored, while unexpected failures AND successes are reported as errors, to help
+keep the list of known defects as up to date as possible.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+from typing import List
+
+#pylint: disable=unused-import
+import scripts_path
+from mbedtls_dev import build_tree
+
+# PSA Compliance tests we expect to fail due to known defects in Mbed TLS /
+# TF-PSA-Crypto (or the test suite).
+# The test numbers correspond to the numbers used by the console output of the test suite.
+# Test number 2xx corresponds to the files in the folder
+# psa-arch-tests/api-tests/dev_apis/crypto/test_c0xx
+EXPECTED_FAILURES = {} # type: dict
+
+PSA_ARCH_TESTS_REPO = 'https://github.com/ARM-software/psa-arch-tests.git'
+PSA_ARCH_TESTS_REF = 'v23.06_API1.5_ADAC_EAC'
+
+#pylint: disable=too-many-branches,too-many-statements,too-many-locals
+def main(library_build_dir: str):
+ root_dir = os.getcwd()
+
+ in_tf_psa_crypto_repo = build_tree.looks_like_tf_psa_crypto_root(root_dir)
+
+ crypto_name = build_tree.crypto_library_filename(root_dir)
+ library_subdir = build_tree.crypto_core_directory(root_dir, relative=True)
+
+ crypto_lib_filename = (library_build_dir + '/' +
+ library_subdir + '/' +
+ 'lib' + crypto_name + '.a')
+
+ if not os.path.exists(crypto_lib_filename):
+ #pylint: disable=bad-continuation
+ subprocess.check_call([
+ 'cmake', '.',
+ '-GUnix Makefiles',
+ '-B' + library_build_dir
+ ])
+ subprocess.check_call(['cmake', '--build', library_build_dir,
+ '--target', crypto_name])
+
+ psa_arch_tests_dir = 'psa-arch-tests'
+ os.makedirs(psa_arch_tests_dir, exist_ok=True)
+ try:
+ os.chdir(psa_arch_tests_dir)
+
+ # Reuse existing local clone
+ subprocess.check_call(['git', 'init'])
+ subprocess.check_call(['git', 'fetch', PSA_ARCH_TESTS_REPO, PSA_ARCH_TESTS_REF])
+ subprocess.check_call(['git', 'checkout', 'FETCH_HEAD'])
+
+ build_dir = 'api-tests/build'
+ try:
+ shutil.rmtree(build_dir)
+ except FileNotFoundError:
+ pass
+ os.mkdir(build_dir)
+ os.chdir(build_dir)
+
+ extra_includes = (';{}/drivers/builtin/include'.format(root_dir)
+ if in_tf_psa_crypto_repo else '')
+
+ #pylint: disable=bad-continuation
+ subprocess.check_call([
+ 'cmake', '..',
+ '-GUnix Makefiles',
+ '-DTARGET=tgt_dev_apis_stdc',
+ '-DTOOLCHAIN=HOST_GCC',
+ '-DSUITE=CRYPTO',
+ '-DPSA_CRYPTO_LIB_FILENAME={}/{}'.format(root_dir,
+ crypto_lib_filename),
+ ('-DPSA_INCLUDE_PATHS={}/include' + extra_includes).format(root_dir)
+ ])
+ subprocess.check_call(['cmake', '--build', '.'])
+
+ proc = subprocess.Popen(['./psa-arch-tests-crypto'],
+ bufsize=1, stdout=subprocess.PIPE, universal_newlines=True)
+
+ test_re = re.compile(
+ '^TEST: (?P<test_num>[0-9]*)|'
+ '^TEST RESULT: (?P<test_result>FAILED|PASSED)'
+ )
+ test = -1
+ unexpected_successes = set(EXPECTED_FAILURES)
+ expected_failures = [] # type: List[int]
+ unexpected_failures = [] # type: List[int]
+ if proc.stdout is None:
+ return 1
+
+ for line in proc.stdout:
+ print(line, end='')
+ match = test_re.match(line)
+ if match is not None:
+ groupdict = match.groupdict()
+ test_num = groupdict['test_num']
+ if test_num is not None:
+ test = int(test_num)
+ elif groupdict['test_result'] == 'FAILED':
+ try:
+ unexpected_successes.remove(test)
+ expected_failures.append(test)
+ print('Expected failure, ignoring')
+ except KeyError:
+ unexpected_failures.append(test)
+ print('ERROR: Unexpected failure')
+ elif test in unexpected_successes:
+ print('ERROR: Unexpected success')
+ proc.wait()
+
+ print()
+ print('***** test_psa_compliance.py report ******')
+ print()
+ print('Expected failures:', ', '.join(str(i) for i in expected_failures))
+ print('Unexpected failures:', ', '.join(str(i) for i in unexpected_failures))
+ print('Unexpected successes:', ', '.join(str(i) for i in sorted(unexpected_successes)))
+ print()
+ if unexpected_successes or unexpected_failures:
+ if unexpected_successes:
+ print('Unexpected successes encountered.')
+ print('Please remove the corresponding tests from '
+ 'EXPECTED_FAILURES in tests/scripts/compliance_test.py')
+ print()
+ print('FAILED')
+ return 1
+ else:
+ print('SUCCESS')
+ return 0
+ finally:
+ os.chdir(root_dir)
+
+if __name__ == '__main__':
+ BUILD_DIR = 'out_of_source_build'
+
+ # pylint: disable=invalid-name
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--build-dir', nargs=1,
+ help='path to Mbed TLS / TF-PSA-Crypto build directory')
+ args = parser.parse_args()
+
+ if args.build_dir is not None:
+ BUILD_DIR = args.build_dir[0]
+
+ sys.exit(main(BUILD_DIR))
diff --git a/tests/scripts/test_psa_constant_names.py b/tests/scripts/test_psa_constant_names.py
new file mode 100755
index 0000000..6883e27
--- /dev/null
+++ b/tests/scripts/test_psa_constant_names.py
@@ -0,0 +1,191 @@
+#!/usr/bin/env python3
+"""Test the program psa_constant_names.
+Gather constant names from header files and test cases. Compile a C program
+to print out their numerical values, feed these numerical values to
+psa_constant_names, and check that the output is the original name.
+Return 0 if all test cases pass, 1 if the output was not always as expected,
+or 1 (with a Python backtrace) if there was an operational error.
+"""
+
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+import argparse
+from collections import namedtuple
+import os
+import re
+import subprocess
+import sys
+from typing import Iterable, List, Optional, Tuple
+
+import scripts_path # pylint: disable=unused-import
+from mbedtls_dev import c_build_helper
+from mbedtls_dev.macro_collector import InputsForTest, PSAMacroEnumerator
+from mbedtls_dev import typing_util
+
+def gather_inputs(headers: Iterable[str],
+ test_suites: Iterable[str],
+ inputs_class=InputsForTest) -> PSAMacroEnumerator:
+ """Read the list of inputs to test psa_constant_names with."""
+ inputs = inputs_class()
+ for header in headers:
+ inputs.parse_header(header)
+ for test_cases in test_suites:
+ inputs.parse_test_cases(test_cases)
+ inputs.add_numerical_values()
+ inputs.gather_arguments()
+ return inputs
+
+def run_c(type_word: str,
+ expressions: Iterable[str],
+ include_path: Optional[str] = None,
+ keep_c: bool = False) -> List[str]:
+ """Generate and run a program to print out numerical values of C expressions."""
+ if type_word == 'status':
+ cast_to = 'long'
+ printf_format = '%ld'
+ else:
+ cast_to = 'unsigned long'
+ printf_format = '0x%08lx'
+ return c_build_helper.get_c_expression_values(
+ cast_to, printf_format,
+ expressions,
+ caller='test_psa_constant_names.py for {} values'.format(type_word),
+ file_label=type_word,
+ header='#include <psa/crypto.h>',
+ include_path=include_path,
+ keep_c=keep_c
+ )
+
+NORMALIZE_STRIP_RE = re.compile(r'\s+')
+def normalize(expr: str) -> str:
+ """Normalize the C expression so as not to care about trivial differences.
+
+ Currently "trivial differences" means whitespace.
+ """
+ return re.sub(NORMALIZE_STRIP_RE, '', expr)
+
+ALG_TRUNCATED_TO_SELF_RE = \
+ re.compile(r'PSA_ALG_AEAD_WITH_SHORTENED_TAG\('
+ r'PSA_ALG_(?:CCM|CHACHA20_POLY1305|GCM)'
+ r', *16\)\Z')
+
+def is_simplifiable(expr: str) -> bool:
+ """Determine whether an expression is simplifiable.
+
+ Simplifiable expressions can't be output in their input form, since
+ the output will be the simple form. Therefore they must be excluded
+ from testing.
+ """
+ if ALG_TRUNCATED_TO_SELF_RE.match(expr):
+ return True
+ return False
+
+def collect_values(inputs: InputsForTest,
+ type_word: str,
+ include_path: Optional[str] = None,
+ keep_c: bool = False) -> Tuple[List[str], List[str]]:
+ """Generate expressions using known macro names and calculate their values.
+
+ Return a list of pairs of (expr, value) where expr is an expression and
+ value is a string representation of its integer value.
+ """
+ names = inputs.get_names(type_word)
+ expressions = sorted(expr
+ for expr in inputs.generate_expressions(names)
+ if not is_simplifiable(expr))
+ values = run_c(type_word, expressions,
+ include_path=include_path, keep_c=keep_c)
+ return expressions, values
+
+class Tests:
+ """An object representing tests and their results."""
+
+ Error = namedtuple('Error',
+ ['type', 'expression', 'value', 'output'])
+
+ def __init__(self, options) -> None:
+ self.options = options
+ self.count = 0
+ self.errors = [] #type: List[Tests.Error]
+
+ def run_one(self, inputs: InputsForTest, type_word: str) -> None:
+ """Test psa_constant_names for the specified type.
+
+ Run the program on the names for this type.
+ Use the inputs to figure out what arguments to pass to macros that
+ take arguments.
+ """
+ expressions, values = collect_values(inputs, type_word,
+ include_path=self.options.include,
+ keep_c=self.options.keep_c)
+ output_bytes = subprocess.check_output([self.options.program,
+ type_word] + values)
+ output = output_bytes.decode('ascii')
+ outputs = output.strip().split('\n')
+ self.count += len(expressions)
+ for expr, value, output in zip(expressions, values, outputs):
+ if self.options.show:
+ sys.stdout.write('{} {}\t{}\n'.format(type_word, value, output))
+ if normalize(expr) != normalize(output):
+ self.errors.append(self.Error(type=type_word,
+ expression=expr,
+ value=value,
+ output=output))
+
+ def run_all(self, inputs: InputsForTest) -> None:
+ """Run psa_constant_names on all the gathered inputs."""
+ for type_word in ['status', 'algorithm', 'ecc_curve', 'dh_group',
+ 'key_type', 'key_usage']:
+ self.run_one(inputs, type_word)
+
+ def report(self, out: typing_util.Writable) -> None:
+ """Describe each case where the output is not as expected.
+
+ Write the errors to ``out``.
+ Also write a total.
+ """
+ for error in self.errors:
+ out.write('For {} "{}", got "{}" (value: {})\n'
+ .format(error.type, error.expression,
+ error.output, error.value))
+ out.write('{} test cases'.format(self.count))
+ if self.errors:
+ out.write(', {} FAIL\n'.format(len(self.errors)))
+ else:
+ out.write(' PASS\n')
+
+HEADERS = ['psa/crypto.h', 'psa/crypto_extra.h', 'psa/crypto_values.h']
+TEST_SUITES = ['tests/suites/test_suite_psa_crypto_metadata.data']
+
+def main():
+ parser = argparse.ArgumentParser(description=globals()['__doc__'])
+ parser.add_argument('--include', '-I',
+ action='append', default=['include'],
+ help='Directory for header files')
+ parser.add_argument('--keep-c',
+ action='store_true', dest='keep_c', default=False,
+ help='Keep the intermediate C file')
+ parser.add_argument('--no-keep-c',
+ action='store_false', dest='keep_c',
+ help='Don\'t keep the intermediate C file (default)')
+ parser.add_argument('--program',
+ default='programs/psa/psa_constant_names',
+ help='Program to test')
+ parser.add_argument('--show',
+ action='store_true',
+ help='Show tested values on stdout')
+ parser.add_argument('--no-show',
+ action='store_false', dest='show',
+ help='Don\'t show tested values (default)')
+ options = parser.parse_args()
+ headers = [os.path.join(options.include[0], h) for h in HEADERS]
+ inputs = gather_inputs(headers, TEST_SUITES)
+ tests = Tests(options)
+ tests.run_all(inputs)
+ tests.report(sys.stdout)
+ if tests.errors:
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/test_zeroize.gdb b/tests/scripts/test_zeroize.gdb
new file mode 100644
index 0000000..57f771f
--- /dev/null
+++ b/tests/scripts/test_zeroize.gdb
@@ -0,0 +1,64 @@
+# test_zeroize.gdb
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# Run a test using the debugger to check that the mbedtls_platform_zeroize()
+# function in platform_util.h is not being optimized out by the compiler. To do
+# so, the script loads the test program at programs/test/zeroize.c and sets a
+# breakpoint at the last return statement in main(). When the breakpoint is
+# hit, the debugger manually checks the contents to be zeroized and checks that
+# it is actually cleared.
+#
+# The mbedtls_platform_zeroize() test is debugger driven because there does not
+# seem to be a mechanism to reliably check whether the zeroize calls are being
+# eliminated by compiler optimizations from within the compiled program. The
+# problem is that a compiler would typically remove what it considers to be
+# "unnecessary" assignments as part of redundant code elimination. To identify
+# such code, the compilar will create some form dependency graph between
+# reads and writes to variables (among other situations). It will then use this
+# data structure to remove redundant code that does not have an impact on the
+# program's observable behavior. In the case of mbedtls_platform_zeroize(), an
+# intelligent compiler could determine that this function clears a block of
+# memory that is not accessed later in the program, so removing the call to
+# mbedtls_platform_zeroize() does not have an observable behavior. However,
+# inserting a test after a call to mbedtls_platform_zeroize() to check whether
+# the block of memory was correctly zeroed would force the compiler to not
+# eliminate the mbedtls_platform_zeroize() call. If this does not occur, then
+# the compiler potentially has a bug.
+#
+# Note: This test requires that the test program is compiled with -g3.
+
+set confirm off
+
+file ./programs/test/zeroize
+
+search GDB_BREAK_HERE
+break $_
+
+set args ./programs/test/zeroize.c
+run
+
+set $i = 0
+set $len = sizeof(buf)
+set $buf = buf
+
+while $i < $len
+ if $buf[$i++] != 0
+ echo The buffer at was not zeroized\n
+ quit 1
+ end
+end
+
+echo The buffer was correctly zeroized\n
+
+continue
+
+if $_exitcode != 0
+ echo The program did not terminate correctly\n
+ quit 1
+end
+
+quit 0
diff --git a/tests/scripts/translate_ciphers.py b/tests/scripts/translate_ciphers.py
new file mode 100755
index 0000000..90514fc
--- /dev/null
+++ b/tests/scripts/translate_ciphers.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python3
+
+# translate_ciphers.py
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+
+"""
+Translate standard ciphersuite names to GnuTLS, OpenSSL and Mbed TLS standards.
+
+To test the translation functions run:
+python3 -m unittest translate_cipher.py
+"""
+
+import re
+import argparse
+import unittest
+
+class TestTranslateCiphers(unittest.TestCase):
+ """
+ Ensure translate_ciphers.py translates and formats ciphersuite names
+ correctly
+ """
+ def test_translate_all_cipher_names(self):
+ """
+ Translate standard ciphersuite names to GnuTLS, OpenSSL and
+ Mbed TLS counterpart. Use only a small subset of ciphers
+ that exercise each step of the translation functions
+ """
+ ciphers = [
+ ("TLS_ECDHE_ECDSA_WITH_NULL_SHA",
+ "+ECDHE-ECDSA:+NULL:+SHA1",
+ "ECDHE-ECDSA-NULL-SHA",
+ "TLS-ECDHE-ECDSA-WITH-NULL-SHA"),
+ ("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
+ "+ECDHE-ECDSA:+AES-128-GCM:+AEAD",
+ "ECDHE-ECDSA-AES128-GCM-SHA256",
+ "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256"),
+ ("TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA",
+ "+DHE-RSA:+3DES-CBC:+SHA1",
+ "EDH-RSA-DES-CBC3-SHA",
+ "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA"),
+ ("TLS_RSA_WITH_AES_256_CBC_SHA",
+ "+RSA:+AES-256-CBC:+SHA1",
+ "AES256-SHA",
+ "TLS-RSA-WITH-AES-256-CBC-SHA"),
+ ("TLS_PSK_WITH_3DES_EDE_CBC_SHA",
+ "+PSK:+3DES-CBC:+SHA1",
+ "PSK-3DES-EDE-CBC-SHA",
+ "TLS-PSK-WITH-3DES-EDE-CBC-SHA"),
+ ("TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
+ None,
+ "ECDHE-ECDSA-CHACHA20-POLY1305",
+ "TLS-ECDHE-ECDSA-WITH-CHACHA20-POLY1305-SHA256"),
+ ("TLS_ECDHE_ECDSA_WITH_AES_128_CCM",
+ "+ECDHE-ECDSA:+AES-128-CCM:+AEAD",
+ None,
+ "TLS-ECDHE-ECDSA-WITH-AES-128-CCM"),
+ ("TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384",
+ None,
+ "ECDHE-ARIA256-GCM-SHA384",
+ "TLS-ECDHE-RSA-WITH-ARIA-256-GCM-SHA384"),
+ ]
+
+ for s, g_exp, o_exp, m_exp in ciphers:
+
+ if g_exp is not None:
+ g = translate_gnutls(s)
+ self.assertEqual(g, g_exp)
+
+ if o_exp is not None:
+ o = translate_ossl(s)
+ self.assertEqual(o, o_exp)
+
+ if m_exp is not None:
+ m = translate_mbedtls(s)
+ self.assertEqual(m, m_exp)
+
+def translate_gnutls(s_cipher):
+ """
+ Translate s_cipher from standard ciphersuite naming convention
+ and return the GnuTLS naming convention
+ """
+
+ # Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
+ # naming convention
+ s_cipher = s_cipher.replace("_", "-")
+
+ s_cipher = re.sub(r'\ATLS-', '+', s_cipher)
+ s_cipher = s_cipher.replace("-WITH-", ":+")
+ s_cipher = s_cipher.replace("-EDE", "")
+
+ # SHA in Mbed TLS == SHA1 GnuTLS,
+ # if the last 3 chars are SHA append 1
+ if s_cipher[-3:] == "SHA":
+ s_cipher = s_cipher+"1"
+
+ # CCM or CCM-8 should be followed by ":+AEAD"
+ # Replace "GCM:+SHAxyz" with "GCM:+AEAD"
+ if "CCM" in s_cipher or "GCM" in s_cipher:
+ s_cipher = re.sub(r"GCM-SHA\d\d\d", "GCM", s_cipher)
+ s_cipher = s_cipher+":+AEAD"
+
+ # Replace the last "-" with ":+"
+ else:
+ index = s_cipher.rindex("-")
+ s_cipher = s_cipher[:index] + ":+" + s_cipher[index+1:]
+
+ return s_cipher
+
+def translate_ossl(s_cipher):
+ """
+ Translate s_cipher from standard ciphersuite naming convention
+ and return the OpenSSL naming convention
+ """
+
+ # Replace "_" with "-" to handle ciphersuite names based on Mbed TLS
+ # naming convention
+ s_cipher = s_cipher.replace("_", "-")
+
+ s_cipher = re.sub(r'^TLS-', '', s_cipher)
+ s_cipher = s_cipher.replace("-WITH", "")
+
+ # Remove the "-" from "ABC-xyz"
+ s_cipher = s_cipher.replace("AES-", "AES")
+ s_cipher = s_cipher.replace("CAMELLIA-", "CAMELLIA")
+ s_cipher = s_cipher.replace("ARIA-", "ARIA")
+
+ # Remove "RSA" if it is at the beginning
+ s_cipher = re.sub(r'^RSA-', r'', s_cipher)
+
+ # For all circumstances outside of PSK
+ if "PSK" not in s_cipher:
+ s_cipher = s_cipher.replace("-EDE", "")
+ s_cipher = s_cipher.replace("3DES-CBC", "DES-CBC3")
+
+ # Remove "CBC" if it is not prefixed by DES
+ s_cipher = re.sub(r'(?<!DES-)CBC-', r'', s_cipher)
+
+ # ECDHE-RSA-ARIA does not exist in OpenSSL
+ s_cipher = s_cipher.replace("ECDHE-RSA-ARIA", "ECDHE-ARIA")
+
+ # POLY1305 should not be followed by anything
+ if "POLY1305" in s_cipher:
+ index = s_cipher.rindex("POLY1305")
+ s_cipher = s_cipher[:index+8]
+
+ # If DES is being used, Replace DHE with EDH
+ if "DES" in s_cipher and "DHE" in s_cipher and "ECDHE" not in s_cipher:
+ s_cipher = s_cipher.replace("DHE", "EDH")
+
+ return s_cipher
+
+def translate_mbedtls(s_cipher):
+ """
+ Translate s_cipher from standard ciphersuite naming convention
+ and return Mbed TLS ciphersuite naming convention
+ """
+
+ # Replace "_" with "-"
+ s_cipher = s_cipher.replace("_", "-")
+
+ return s_cipher
+
+def format_ciphersuite_names(mode, names):
+ t = {"g": translate_gnutls,
+ "o": translate_ossl,
+ "m": translate_mbedtls
+ }[mode]
+ return " ".join(c + '=' + t(c) for c in names)
+
+def main(target, names):
+ print(format_ciphersuite_names(target, names))
+
+if __name__ == "__main__":
+ PARSER = argparse.ArgumentParser()
+ PARSER.add_argument('target', metavar='TARGET', choices=['o', 'g', 'm'])
+ PARSER.add_argument('names', metavar='NAMES', nargs='+')
+ ARGS = PARSER.parse_args()
+ main(ARGS.target, ARGS.names)
diff --git a/tests/scripts/travis-log-failure.sh b/tests/scripts/travis-log-failure.sh
new file mode 100755
index 0000000..3daecf3
--- /dev/null
+++ b/tests/scripts/travis-log-failure.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+# travis-log-failure.sh
+#
+# Copyright The Mbed TLS Contributors
+# SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
+#
+# Purpose
+#
+# List the server and client logs on failed ssl-opt.sh and compat.sh tests.
+# This script is used to make the logs show up in the Travis test results.
+#
+# Some of the logs can be very long: this means usually a couple of megabytes
+# but it can be much more. For example, the client log of test 273 in ssl-opt.sh
+# is more than 630 Megabytes long.
+
+if [ -d include/mbedtls ]; then :; else
+ echo "$0: must be run from root" >&2
+ exit 1
+fi
+
+FILES="o-srv-*.log o-cli-*.log c-srv-*.log c-cli-*.log o-pxy-*.log"
+MAX_LOG_SIZE=1048576
+
+for PATTERN in $FILES; do
+ for LOG in $( ls tests/$PATTERN 2>/dev/null ); do
+ echo
+ echo "****** BEGIN file: $LOG ******"
+ echo
+ tail -c $MAX_LOG_SIZE $LOG
+ echo "****** END file: $LOG ******"
+ echo
+ rm $LOG
+ done
+done