Merge branch '2022-09-12-update-pytests-for-more-parellel-support' into next

To quote the author:
This series makes a further attempt to get closer to having all tests
run in parallel. It introduces a new 'make pcheck' option which runs
tests in parallel, skipping those that are not compatible.

A number of fixes are included for existing tests. The vboot test is
updated to only run a single scenario in 'quick' mode.

This makes use of pytest's parallel-testing features. The resulting
times (including incremental building with LTO) on a 16-core machine are
as follows:

   make pcheck        - 1 minute 6 seconds
   make qcheck        - 3 minutes
   make check         - 5 minutes 15 seconds

Note that this is not a fair comparison, since 'make pcheck' omits a
number of tests, even more than 'make qcheck'.
diff --git a/Makefile b/Makefile
index 27375f0..0b8079c 100644
--- a/Makefile
+++ b/Makefile
@@ -521,8 +521,8 @@
 
 no-dot-config-targets := clean clobber mrproper distclean \
 			 help %docs check% coccicheck \
-			 ubootversion backup tests check qcheck tcheck pylint \
-			 pylint_err
+			 ubootversion backup tests check pcheck qcheck tcheck \
+			 pylint pylint_err
 
 config-targets := 0
 mixed-targets  := 0
@@ -2364,6 +2364,7 @@
 	@echo  'Test targets:'
 	@echo  ''
 	@echo  '  check           - Run all automated tests that use sandbox'
+	@echo  '  pcheck          - Run quick automated tests in parallel'
 	@echo  '  qcheck          - Run quick automated tests that use sandbox'
 	@echo  '  tcheck          - Run quick automated tests on tools'
 	@echo  '  pylint          - Run pylint on all Python files'
@@ -2409,6 +2410,9 @@
 tests check:
 	$(srctree)/test/run
 
+pcheck:
+	$(srctree)/test/run parallel
+
 qcheck:
 	$(srctree)/test/run quick
 
diff --git a/doc/develop/py_testing.rst b/doc/develop/py_testing.rst
index 06f9196..92fbd22 100644
--- a/doc/develop/py_testing.rst
+++ b/doc/develop/py_testing.rst
@@ -121,31 +121,36 @@
 Running tests in parallel
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Note: This does not fully work yet and is documented only so you can try to
-fix the problems.
+Note: Not all tests can run in parallel at present, so the usual approach is
+to just run those that can.
 
 First install support for parallel tests::
 
+    sudo apt install python3-pytest-xdist
+
+or:::
+
     pip3 install pytest-xdist
 
-Then build sandbox in a suitable build directory. It is not possible to use
-the --build flag with xdist.
+Then run the tests in parallel using the -n flag::
 
-Finally, run the tests in parallel using the -n flag::
+    test/py/test.py -B sandbox --build --build-dir /tmp/b/sandbox -q -k \
+        'not slow and not bootstd and not spi_flash' -n16
 
-    # build sandbox first, in a suitable build directory. It is not possible
-    # to use the --build flag with -n
-    test/py/test.py -B sandbox --build-dir /tmp/b/sandbox -q -k 'not slow' -n32
+You can also use `make pcheck` to run all tests in parallel. This uses a maximum
+of 16 threads, since the setup time is significant and there are under 1000
+tests.
 
-At least the following non-slow tests are known to fail:
+Note that the `test-log.html` output does not work correctly at present with
+parallel testing. All the threads write to it at once, so it is garbled.
 
-- test_fit_ecdsa
-- test_bind_unbind_with_uclass
-- ut_dm_spi_flash
-- test_gpt_rename_partition
-- test_gpt_swap_partitions
-- test_pinmux_status
-- test_sqfs_load
+Note that the `tools/` tests still run each tool's tests once after the other,
+although within that, they do run in parallel. So for example, the buildman
+tests run in parallel, then the binman tests run in parallel. There would be a
+significant advantage to running them all in parallel together, but that would
+require a large amount of refactoring, e.g. with more use of pytest fixtures.
+The code-coverage tests are omitted since they cannot run in parallel due to a
+Python limitation.
 
 
 Testing under a debugger
diff --git a/doc/develop/testing.rst b/doc/develop/testing.rst
index 1abe4d7..5afeb42 100644
--- a/doc/develop/testing.rst
+++ b/doc/develop/testing.rst
@@ -28,8 +28,12 @@
 
     make tcheck
 
+You can also run a selection tests in parallel with::
+
+    make pcheck
+
 All of the above use the test/run script with a paremeter to select which tests
-are run.
+are run. See :doc:`py_testing` for more information.
 
 
 Sandbox
diff --git a/test/bootm.c b/test/bootm.c
index 7d03e1e..4bb3ca0 100644
--- a/test/bootm.c
+++ b/test/bootm.c
@@ -208,7 +208,8 @@
 /* Test substitution processing in the bootargs variable */
 static int bootm_test_subst_var(struct unit_test_state *uts)
 {
-	env_set("bootargs", NULL);
+	ut_assertok(env_set("silent_linux", "yes"));
+	ut_assertok(env_set("bootargs", NULL));
 	ut_assertok(bootm_process_cmdline_env(BOOTM_CL_SILENT));
 	ut_asserteq_str("console=ttynull", env_get("bootargs"));
 
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 2ba3447..304e931 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -15,9 +15,11 @@
 import atexit
 import configparser
 import errno
+import filelock
 import io
 import os
 import os.path
+from pathlib import Path
 import pytest
 import re
 from _pytest.runner import runtestprotocol
@@ -27,6 +29,8 @@
 log = None
 console = None
 
+TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
+
 def mkdir_p(path):
     """Create a directory path.
 
@@ -76,6 +80,53 @@
         help='Run sandbox under gdbserver. The argument is the channel '+
         'over which gdbserver should communicate, e.g. localhost:1234')
 
+def run_build(config, source_dir, build_dir, board_type, log):
+    """run_build: Build U-Boot
+
+    Args:
+        config: The pytest configuration.
+        soruce_dir (str): Directory containing source code
+        build_dir (str): Directory to build in
+        board_type (str): board_type parameter (e.g. 'sandbox')
+        log (Logfile): Log file to use
+    """
+    if config.getoption('buildman'):
+        if build_dir != source_dir:
+            dest_args = ['-o', build_dir, '-w']
+        else:
+            dest_args = ['-i']
+        cmds = (['buildman', '--board', board_type] + dest_args,)
+        name = 'buildman'
+    else:
+        if build_dir != source_dir:
+            o_opt = 'O=%s' % build_dir
+        else:
+            o_opt = ''
+        cmds = (
+            ['make', o_opt, '-s', board_type + '_defconfig'],
+            ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
+        )
+        name = 'make'
+
+    with log.section(name):
+        runner = log.get_runner(name, sys.stdout)
+        for cmd in cmds:
+            runner.run(cmd, cwd=source_dir)
+        runner.close()
+        log.status_pass('OK')
+
+def pytest_xdist_setupnodes(config, specs):
+    """Clear out any 'done' file from a previous build"""
+    global build_done_file
+    build_dir = config.getoption('build_dir')
+    board_type = config.getoption('board_type')
+    source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
+    if not build_dir:
+        build_dir = source_dir + '/build-' + board_type
+    build_done_file = Path(build_dir) / 'build.done'
+    if build_done_file.exists():
+        os.remove(build_done_file)
+
 def pytest_configure(config):
     """pytest hook: Perform custom initialization at startup time.
 
@@ -110,8 +161,7 @@
     global console
     global ubconfig
 
-    test_py_dir = os.path.dirname(os.path.abspath(__file__))
-    source_dir = os.path.dirname(os.path.dirname(test_py_dir))
+    source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
 
     board_type = config.getoption('board_type')
     board_type_filename = board_type.replace('-', '_')
@@ -142,30 +192,13 @@
     log = multiplexed_log.Logfile(result_dir + '/test-log.html')
 
     if config.getoption('build'):
-        if config.getoption('buildman'):
-            if build_dir != source_dir:
-                dest_args = ['-o', build_dir, '-w']
-            else:
-                dest_args = ['-i']
-            cmds = (['buildman', '--board', board_type] + dest_args,)
-            name = 'buildman'
-        else:
-            if build_dir != source_dir:
-                o_opt = 'O=%s' % build_dir
-            else:
-                o_opt = ''
-            cmds = (
-                ['make', o_opt, '-s', board_type + '_defconfig'],
-                ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
-            )
-            name = 'make'
-
-        with log.section(name):
-            runner = log.get_runner(name, sys.stdout)
-            for cmd in cmds:
-                runner.run(cmd, cwd=source_dir)
-            runner.close()
-            log.status_pass('OK')
+        worker_id = os.environ.get("PYTEST_XDIST_WORKER")
+        with filelock.FileLock(os.path.join(build_dir, 'build.lock')):
+            build_done_file = Path(build_dir) / 'build.done'
+            if (not worker_id or worker_id == 'master' or
+                not build_done_file.exists()):
+                run_build(config, source_dir, build_dir, board_type, log)
+                build_done_file.touch()
 
     class ArbitraryAttributeContainer(object):
         pass
@@ -197,7 +230,7 @@
     else:
         parse_config('include/autoconf.mk')
 
-    ubconfig.test_py_dir = test_py_dir
+    ubconfig.test_py_dir = TEST_PY_DIR
     ubconfig.source_dir = source_dir
     ubconfig.build_dir = build_dir
     ubconfig.result_dir = result_dir
@@ -521,6 +554,22 @@
         if not tool_is_in_path(tool):
             pytest.skip('tool "%s" not in $PATH' % tool)
 
+def setup_singlethread(item):
+    """Process any 'singlethread' marker for a test.
+
+    Skip this test if running in parallel.
+
+    Args:
+        item: The pytest test item.
+
+    Returns:
+        Nothing.
+    """
+    for single in item.iter_markers('singlethread'):
+        worker_id = os.environ.get("PYTEST_XDIST_WORKER")
+        if worker_id and worker_id != 'master':
+            pytest.skip('must run single-threaded')
+
 def start_test_section(item):
     anchors[item.name] = log.start_section(item.name)
 
@@ -541,6 +590,7 @@
     setup_boardspec(item)
     setup_buildconfigspec(item)
     setup_requiredtool(item)
+    setup_singlethread(item)
 
 def pytest_runtest_protocol(item, nextitem):
     """pytest hook: Called to execute a test.
diff --git a/test/py/pytest.ini b/test/py/pytest.ini
index e93d010..26d83f8 100644
--- a/test/py/pytest.ini
+++ b/test/py/pytest.ini
@@ -11,3 +11,4 @@
     notbuildconfigspec: U-Boot: Describes required disabled Kconfig options.
     requiredtool: U-Boot: Required host tools for a test.
     slow: U-Boot: Specific test will run slowly.
+    singlethread: Cannot run in parallel
diff --git a/test/py/requirements.txt b/test/py/requirements.txt
index ead92ed..1bf77b5 100644
--- a/test/py/requirements.txt
+++ b/test/py/requirements.txt
@@ -2,6 +2,7 @@
 attrs==19.3.0
 coverage==4.5.4
 extras==1.0.0
+filelock==3.0.12
 fixtures==3.0.0
 importlib-metadata==0.23
 linecache2==1.0.0
@@ -15,6 +16,7 @@
 pygit2==1.9.2
 pyparsing==2.4.2
 pytest==6.2.5
+pytest-xdist==2.5.0
 python-mimeparse==1.6.0
 python-subunit==1.3.0
 requests==2.25.1
diff --git a/test/py/tests/test_bind.py b/test/py/tests/test_bind.py
index c90c54d..1376ab5 100644
--- a/test/py/tests/test_bind.py
+++ b/test/py/tests/test_bind.py
@@ -119,6 +119,7 @@
 
 @pytest.mark.boardspec('sandbox')
 @pytest.mark.buildconfigspec('cmd_bind')
+@pytest.mark.singlethread
 def test_bind_unbind_with_uclass(u_boot_console):
     #bind /bind-test
     response = u_boot_console.run_command('bind  /bind-test simple_bus')
diff --git a/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py
index 75a6e7c..1bb59d8 100644
--- a/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py
+++ b/test/py/tests/test_efi_bootmgr/test_efi_bootmgr.py
@@ -7,6 +7,7 @@
 @pytest.mark.boardspec('sandbox')
 @pytest.mark.buildconfigspec('cmd_efidebug')
 @pytest.mark.buildconfigspec('cmd_bootefi_bootmgr')
+@pytest.mark.singlethread
 def test_efi_bootmgr(u_boot_console, efi_bootmgr_data):
     """ Unit test for UEFI bootmanager
     The efidebug command is used to set up UEFI load options.
diff --git a/test/py/tests/test_fit_ecdsa.py b/test/py/tests/test_fit_ecdsa.py
index 87b6081..cc6c0c4 100644
--- a/test/py/tests/test_fit_ecdsa.py
+++ b/test/py/tests/test_fit_ecdsa.py
@@ -10,6 +10,7 @@
 This test doesn't run the sandbox. It only checks the host tool 'mkimage'
 """
 
+import os
 import pytest
 import u_boot_utils as util
 from Cryptodome.Hash import SHA256
@@ -84,7 +85,8 @@
     cons = u_boot_console
     mkimage = cons.config.build_dir + '/tools/mkimage'
     datadir = cons.config.source_dir + '/test/py/tests/vboot/'
-    tempdir = cons.config.result_dir
+    tempdir = os.path.join(cons.config.result_dir, 'ecdsa')
+    os.makedirs(tempdir, exist_ok=True)
     key_file = f'{tempdir}/ecdsa-test-key.pem'
     fit_file = f'{tempdir}/test.fit'
     dtc('sandbox-kernel.dts')
diff --git a/test/py/tests/test_fit_hashes.py b/test/py/tests/test_fit_hashes.py
index e228ea9..4891e77 100644
--- a/test/py/tests/test_fit_hashes.py
+++ b/test/py/tests/test_fit_hashes.py
@@ -10,6 +10,7 @@
 This test doesn't run the sandbox. It only checks the host tool 'mkimage'
 """
 
+import os
 import pytest
 import u_boot_utils as util
 
@@ -93,7 +94,9 @@
     cons = u_boot_console
     mkimage = cons.config.build_dir + '/tools/mkimage'
     datadir = cons.config.source_dir + '/test/py/tests/vboot/'
-    tempdir = cons.config.result_dir
+    tempdir = os.path.join(cons.config.result_dir, 'hashes')
+    os.makedirs(tempdir, exist_ok=True)
+
     fit_file = f'{tempdir}/test.fit'
     dtc('sandbox-kernel.dts')
 
diff --git a/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py
index 9eb00d6..527a556 100644
--- a/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py
+++ b/test/py/tests/test_fs/test_squashfs/test_sqfs_ls.py
@@ -105,6 +105,7 @@
 @pytest.mark.buildconfigspec('cmd_squashfs')
 @pytest.mark.buildconfigspec('fs_squashfs')
 @pytest.mark.requiredtool('mksquashfs')
+@pytest.mark.singlethread
 def test_sqfs_ls(u_boot_console):
     """ Executes the sqfsls test suite.
 
diff --git a/test/py/tests/test_gpio.py b/test/py/tests/test_gpio.py
index fa0af5f..0af186f 100644
--- a/test/py/tests/test_gpio.py
+++ b/test/py/tests/test_gpio.py
@@ -51,6 +51,7 @@
 def test_gpio_read(u_boot_console):
     """Test that gpio read correctly sets the variable to the value of a gpio pin."""
 
+    u_boot_console.run_command('gpio clear 0')
     response = u_boot_console.run_command('gpio read var 0; echo val:$var,rc:$?')
     expected_response = 'val:0,rc:0'
     assert(expected_response in response)
diff --git a/test/py/tests/test_gpt.py b/test/py/tests/test_gpt.py
index f707d9f..cb44e1d 100644
--- a/test/py/tests/test_gpt.py
+++ b/test/py/tests/test_gpt.py
@@ -13,6 +13,9 @@
 the test.
 """
 
+# Mark all tests here as slow
+pytestmark = pytest.mark.slow
+
 class GptTestDiskImage(object):
     """Disk Image used by the GPT tests."""
 
diff --git a/test/py/tests/test_pinmux.py b/test/py/tests/test_pinmux.py
index b3ae2ab..794994e 100644
--- a/test/py/tests/test_pinmux.py
+++ b/test/py/tests/test_pinmux.py
@@ -68,6 +68,7 @@
 def test_pinmux_status(u_boot_console):
     """Test that 'pinmux status' displays selected pincontroller's pin
     muxing descriptions."""
+    u_boot_console.run_command('pinmux dev pinctrl')
     output = u_boot_console.run_command('pinmux status')
 
     assert (not 'pinctrl-gpio:' in output)
diff --git a/test/py/tests/test_vboot.py b/test/py/tests/test_vboot.py
index 040147d..e3e7ca4 100644
--- a/test/py/tests/test_vboot.py
+++ b/test/py/tests/test_vboot.py
@@ -42,7 +42,7 @@
 
 # Only run the full suite on a few combinations, since it doesn't add any more
 # test coverage.
-TESTDATA = [
+TESTDATA_IN = [
     ['sha1-basic', 'sha1', '', None, False, True, False, False],
     ['sha1-pad', 'sha1', '', '-E -p 0x10000', False, False, False, False],
     ['sha1-pss', 'sha1', '-pss', None, False, False, False, False],
@@ -60,6 +60,10 @@
     ['sha256-global-sign-pss', 'sha256', '-pss', '', False, False, False, True],
 ]
 
+# Mark all but the first test as slow, so they are not run with '-k not slow'
+TESTDATA = [TESTDATA_IN[0]]
+TESTDATA += [pytest.param(*v, marks=pytest.mark.slow) for v in TESTDATA_IN[1:]]
+
 @pytest.mark.boardspec('sandbox')
 @pytest.mark.buildconfigspec('fit_signature')
 @pytest.mark.requiredtool('dtc')
diff --git a/test/run b/test/run
index 869406c..810b47e 100755
--- a/test/run
+++ b/test/run
@@ -13,25 +13,47 @@
 	[ $? -ne 0 ] && failures=$((failures+1))
 }
 
-# SKip slow tests if requested
-[ "$1" == "quick" ] && mark_expr="not slow"
-[ "$1" == "quick" ] && skip=--skip-net-tests
+# Select test attributes
+ut_mark_expr=test_ut
+if [ "$1" = "quick" ]; then
+	mark_expr="not slow"
+	ut_mark_expr="test_ut and not slow"
+	skip=--skip-net-tests
+fi
+
 [ "$1" == "tools" ] && tools_only=y
 
+if [ "$1" = "parallel" ]; then
+	if ! echo 'import xdist' | python3 2>/dev/null; then
+		echo "Please install python3-pytest-xdist - see doc/develop/py_testing.rst"
+		exit 1
+	fi
+	jobs="$(($(nproc) > 16 ? 16 : $(nproc)))"
+	para="-n${jobs} -q"
+	prompt="Building and..."
+	skip=--skip-net-tests
+	mark_expr="not slow and not bootstd and not spi_flash"
+	ut_mark_expr="test_ut and not slow and not bootstd and not spi_flash"
+	echo "Note: test log is garbled with parallel tests"
+fi
+
 failures=0
 
 if [ -z "$tools_only" ]; then
 	# Run all tests that the standard sandbox build can support
-	run_test "sandbox" ./test/py/test.py --bd sandbox --build \
-		-m "${mark_expr}"
+	echo "${prompt}"
+	run_test "sandbox" ./test/py/test.py --bd sandbox --build ${para} \
+		-k "${mark_expr}"
 fi
 
 # Run tests which require sandbox_spl
-run_test "sandbox_spl" ./test/py/test.py --bd sandbox_spl --build \
+echo "${prompt}"
+run_test "sandbox_spl" ./test/py/test.py --bd sandbox_spl --build ${para} \
 		-k 'test_ofplatdata or test_handoff or test_spl'
 
 # Run the sane tests with sandbox_noinst (i.e. without OF_PLATDATA_INST)
-run_test "sandbox_spl" ./test/py/test.py --bd sandbox_noinst --build \
+echo "${prompt}"
+run_test "sandbox_spl" ./test/py/test.py --bd sandbox_noinst --build ${para} \
 		-k 'test_ofplatdata or test_handoff or test_spl'
 
 if [ -z "$tools_only" ]; then
@@ -39,8 +61,9 @@
 	# build which does not enable CONFIG_OF_LIVE for the live device tree, so we can
 	# check that functionality is the same. The standard sandbox build (above) uses
 	# CONFIG_OF_LIVE.
+	echo "${prompt}"
 	run_test "sandbox_flattree" ./test/py/test.py --bd sandbox_flattree \
-		--build -k test_ut
+		${para} --build -k "${ut_mark_expr}"
 fi
 
 # Set up a path to dtc (device-tree compiler) and libfdt.py, a library it
@@ -61,10 +84,14 @@
 # This needs you to set up Python test coverage tools.
 # To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
 #   $ sudo apt-get install python-pytest python-coverage
-export PATH=$PATH:${TOOLS_DIR}
-run_test "binman code coverage" ./tools/binman/binman test -T
-run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
-run_test "fdt code coverage" ./tools/dtoc/test_fdt -T
+
+# Code-coverage tests cannot run in parallel, so skip them in that case
+if [ -z "${para}" ]; then
+	export PATH=$PATH:${TOOLS_DIR}
+	run_test "binman code coverage" ./tools/binman/binman test -T
+	run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
+	run_test "fdt code coverage" ./tools/dtoc/test_fdt -T
+fi
 
 if [ $failures == 0 ]; then
 	echo "Tests passed!"
diff --git a/tools/dtoc/test_fdt.py b/tools/dtoc/test_fdt.py
index 8a990b8..a3e36ea 100755
--- a/tools/dtoc/test_fdt.py
+++ b/tools/dtoc/test_fdt.py
@@ -851,4 +851,3 @@
 
 if __name__ == '__main__':
     sys.exit(main())
-sys.exit(1)