blob: 304e93164aae0218eae29363567c78646baa2286 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warren10e50632016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warren10e50632016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
Tom Rini6a990412019-10-24 11:59:21 -040016import configparser
Stephen Warren10e50632016-01-15 11:15:24 -070017import errno
Simon Glass62b92f82022-08-06 17:51:57 -060018import filelock
Tom Rini6a990412019-10-24 11:59:21 -040019import io
Stephen Warren10e50632016-01-15 11:15:24 -070020import os
21import os.path
Simon Glass62b92f82022-08-06 17:51:57 -060022from pathlib import Path
Stephen Warren10e50632016-01-15 11:15:24 -070023import pytest
Stephen Warren770fe172016-02-08 14:44:16 -070024import re
Tom Rini6a990412019-10-24 11:59:21 -040025from _pytest.runner import runtestprotocol
Stephen Warren10e50632016-01-15 11:15:24 -070026import sys
27
28# Globals: The HTML log file, and the connection to the U-Boot console.
29log = None
30console = None
31
Simon Glass62b92f82022-08-06 17:51:57 -060032TEST_PY_DIR = os.path.dirname(os.path.abspath(__file__))
33
Stephen Warren10e50632016-01-15 11:15:24 -070034def mkdir_p(path):
Stephen Warren75e731e2016-01-26 13:41:30 -070035 """Create a directory path.
Stephen Warren10e50632016-01-15 11:15:24 -070036
37 This includes creating any intermediate/parent directories. Any errors
38 caused due to already extant directories are ignored.
39
40 Args:
41 path: The directory path to create.
42
43 Returns:
44 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070045 """
Stephen Warren10e50632016-01-15 11:15:24 -070046
47 try:
48 os.makedirs(path)
49 except OSError as exc:
50 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 pass
52 else:
53 raise
54
55def pytest_addoption(parser):
Stephen Warren75e731e2016-01-26 13:41:30 -070056 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warren10e50632016-01-15 11:15:24 -070057
58 Args:
59 parser: The pytest command-line parser.
60
61 Returns:
62 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070063 """
Stephen Warren10e50632016-01-15 11:15:24 -070064
65 parser.addoption('--build-dir', default=None,
66 help='U-Boot build directory (O=)')
67 parser.addoption('--result-dir', default=None,
68 help='U-Boot test result/tmp directory')
69 parser.addoption('--persistent-data-dir', default=None,
70 help='U-Boot test persistent generated data directory')
71 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
72 help='U-Boot board type')
73 parser.addoption('--board-identity', '--id', default='na',
74 help='U-Boot board identity/instance')
75 parser.addoption('--build', default=False, action='store_true',
76 help='Compile U-Boot before running tests')
Simon Glass6e094842020-03-18 09:43:01 -060077 parser.addoption('--buildman', default=False, action='store_true',
78 help='Use buildman to build U-Boot (assuming --build is given)')
Stephen Warren33db1ee2016-02-04 16:11:50 -070079 parser.addoption('--gdbserver', default=None,
80 help='Run sandbox under gdbserver. The argument is the channel '+
81 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warren10e50632016-01-15 11:15:24 -070082
Simon Glass686fad72022-08-06 17:51:56 -060083def run_build(config, source_dir, build_dir, board_type, log):
84 """run_build: Build U-Boot
85
86 Args:
87 config: The pytest configuration.
88 soruce_dir (str): Directory containing source code
89 build_dir (str): Directory to build in
90 board_type (str): board_type parameter (e.g. 'sandbox')
91 log (Logfile): Log file to use
92 """
93 if config.getoption('buildman'):
94 if build_dir != source_dir:
95 dest_args = ['-o', build_dir, '-w']
96 else:
97 dest_args = ['-i']
98 cmds = (['buildman', '--board', board_type] + dest_args,)
99 name = 'buildman'
100 else:
101 if build_dir != source_dir:
102 o_opt = 'O=%s' % build_dir
103 else:
104 o_opt = ''
105 cmds = (
106 ['make', o_opt, '-s', board_type + '_defconfig'],
107 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
108 )
109 name = 'make'
110
111 with log.section(name):
112 runner = log.get_runner(name, sys.stdout)
113 for cmd in cmds:
114 runner.run(cmd, cwd=source_dir)
115 runner.close()
116 log.status_pass('OK')
117
Simon Glass62b92f82022-08-06 17:51:57 -0600118def pytest_xdist_setupnodes(config, specs):
119 """Clear out any 'done' file from a previous build"""
120 global build_done_file
121 build_dir = config.getoption('build_dir')
122 board_type = config.getoption('board_type')
123 source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
124 if not build_dir:
125 build_dir = source_dir + '/build-' + board_type
126 build_done_file = Path(build_dir) / 'build.done'
127 if build_done_file.exists():
128 os.remove(build_done_file)
129
Stephen Warren10e50632016-01-15 11:15:24 -0700130def pytest_configure(config):
Stephen Warren75e731e2016-01-26 13:41:30 -0700131 """pytest hook: Perform custom initialization at startup time.
Stephen Warren10e50632016-01-15 11:15:24 -0700132
133 Args:
134 config: The pytest configuration.
135
136 Returns:
137 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700138 """
Simon Glassde8e25b2019-12-01 19:34:18 -0700139 def parse_config(conf_file):
140 """Parse a config file, loading it into the ubconfig container
141
142 Args:
143 conf_file: Filename to load (within build_dir)
144
145 Raises
146 Exception if the file does not exist
147 """
148 dot_config = build_dir + '/' + conf_file
149 if not os.path.exists(dot_config):
150 raise Exception(conf_file + ' does not exist; ' +
151 'try passing --build option?')
152
153 with open(dot_config, 'rt') as f:
154 ini_str = '[root]\n' + f.read()
155 ini_sio = io.StringIO(ini_str)
156 parser = configparser.RawConfigParser()
157 parser.read_file(ini_sio)
158 ubconfig.buildconfig.update(parser.items('root'))
Stephen Warren10e50632016-01-15 11:15:24 -0700159
160 global log
161 global console
162 global ubconfig
163
Simon Glass62b92f82022-08-06 17:51:57 -0600164 source_dir = os.path.dirname(os.path.dirname(TEST_PY_DIR))
Stephen Warren10e50632016-01-15 11:15:24 -0700165
166 board_type = config.getoption('board_type')
167 board_type_filename = board_type.replace('-', '_')
168
169 board_identity = config.getoption('board_identity')
170 board_identity_filename = board_identity.replace('-', '_')
171
172 build_dir = config.getoption('build_dir')
173 if not build_dir:
174 build_dir = source_dir + '/build-' + board_type
175 mkdir_p(build_dir)
176
177 result_dir = config.getoption('result_dir')
178 if not result_dir:
179 result_dir = build_dir
180 mkdir_p(result_dir)
181
182 persistent_data_dir = config.getoption('persistent_data_dir')
183 if not persistent_data_dir:
184 persistent_data_dir = build_dir + '/persistent-data'
185 mkdir_p(persistent_data_dir)
186
Stephen Warren33db1ee2016-02-04 16:11:50 -0700187 gdbserver = config.getoption('gdbserver')
Igor Opaniukea5f17d2019-02-12 16:18:14 +0200188 if gdbserver and not board_type.startswith('sandbox'):
189 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren33db1ee2016-02-04 16:11:50 -0700190
Stephen Warren10e50632016-01-15 11:15:24 -0700191 import multiplexed_log
192 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
193
194 if config.getoption('build'):
Simon Glass62b92f82022-08-06 17:51:57 -0600195 worker_id = os.environ.get("PYTEST_XDIST_WORKER")
196 with filelock.FileLock(os.path.join(build_dir, 'build.lock')):
197 build_done_file = Path(build_dir) / 'build.done'
198 if (not worker_id or worker_id == 'master' or
199 not build_done_file.exists()):
200 run_build(config, source_dir, build_dir, board_type, log)
201 build_done_file.touch()
Stephen Warren10e50632016-01-15 11:15:24 -0700202
203 class ArbitraryAttributeContainer(object):
204 pass
205
206 ubconfig = ArbitraryAttributeContainer()
207 ubconfig.brd = dict()
208 ubconfig.env = dict()
209
210 modules = [
211 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
212 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
213 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
214 board_identity_filename),
215 ]
216 for (dict_to_fill, module_name) in modules:
217 try:
218 module = __import__(module_name)
219 except ImportError:
220 continue
221 dict_to_fill.update(module.__dict__)
222
223 ubconfig.buildconfig = dict()
224
Simon Glassde8e25b2019-12-01 19:34:18 -0700225 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
226 # as the standard U-Boot build which leaves it in include/autoconf.mk
227 parse_config('.config')
228 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
229 parse_config('autoconf.mk')
230 else:
231 parse_config('include/autoconf.mk')
Stephen Warren10e50632016-01-15 11:15:24 -0700232
Simon Glass62b92f82022-08-06 17:51:57 -0600233 ubconfig.test_py_dir = TEST_PY_DIR
Stephen Warren10e50632016-01-15 11:15:24 -0700234 ubconfig.source_dir = source_dir
235 ubconfig.build_dir = build_dir
236 ubconfig.result_dir = result_dir
237 ubconfig.persistent_data_dir = persistent_data_dir
238 ubconfig.board_type = board_type
239 ubconfig.board_identity = board_identity
Stephen Warren33db1ee2016-02-04 16:11:50 -0700240 ubconfig.gdbserver = gdbserver
Simon Glass3b097872016-07-03 09:40:36 -0600241 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warren10e50632016-01-15 11:15:24 -0700242
243 env_vars = (
244 'board_type',
245 'board_identity',
246 'source_dir',
247 'test_py_dir',
248 'build_dir',
249 'result_dir',
250 'persistent_data_dir',
251 )
252 for v in env_vars:
253 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
254
Simon Glass13f422e2016-07-04 11:58:37 -0600255 if board_type.startswith('sandbox'):
Stephen Warren10e50632016-01-15 11:15:24 -0700256 import u_boot_console_sandbox
257 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
258 else:
259 import u_boot_console_exec_attach
260 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
261
Simon Glass23300b42021-10-23 17:26:11 -0600262re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$')
Simon Glassed298be2020-10-25 20:38:31 -0600263def generate_ut_subtest(metafunc, fixture_name, sym_path):
Stephen Warren770fe172016-02-08 14:44:16 -0700264 """Provide parametrization for a ut_subtest fixture.
265
266 Determines the set of unit tests built into a U-Boot binary by parsing the
267 list of symbols generated by the build process. Provides this information
268 to test functions by parameterizing their ut_subtest fixture parameter.
269
270 Args:
271 metafunc: The pytest test function.
272 fixture_name: The fixture name to test.
Simon Glassed298be2020-10-25 20:38:31 -0600273 sym_path: Relative path to the symbol file with preceding '/'
274 (e.g. '/u-boot.sym')
Stephen Warren770fe172016-02-08 14:44:16 -0700275
276 Returns:
277 Nothing.
278 """
Simon Glassed298be2020-10-25 20:38:31 -0600279 fn = console.config.build_dir + sym_path
Stephen Warren770fe172016-02-08 14:44:16 -0700280 try:
281 with open(fn, 'rt') as f:
282 lines = f.readlines()
283 except:
284 lines = []
285 lines.sort()
286
287 vals = []
288 for l in lines:
289 m = re_ut_test_list.search(l)
290 if not m:
291 continue
292 vals.append(m.group(1) + ' ' + m.group(2))
293
294 ids = ['ut_' + s.replace(' ', '_') for s in vals]
295 metafunc.parametrize(fixture_name, vals, ids=ids)
296
297def generate_config(metafunc, fixture_name):
298 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warren10e50632016-01-15 11:15:24 -0700299
300 If a test function takes parameter(s) (fixture names) of the form brd__xxx
301 or env__xxx, the brd and env configuration dictionaries are consulted to
302 find the list of values to use for those parameters, and the test is
303 parametrized so that it runs once for each combination of values.
304
305 Args:
306 metafunc: The pytest test function.
Stephen Warren770fe172016-02-08 14:44:16 -0700307 fixture_name: The fixture name to test.
Stephen Warren10e50632016-01-15 11:15:24 -0700308
309 Returns:
310 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700311 """
Stephen Warren10e50632016-01-15 11:15:24 -0700312
313 subconfigs = {
314 'brd': console.config.brd,
315 'env': console.config.env,
316 }
Stephen Warren770fe172016-02-08 14:44:16 -0700317 parts = fixture_name.split('__')
318 if len(parts) < 2:
319 return
320 if parts[0] not in subconfigs:
321 return
322 subconfig = subconfigs[parts[0]]
323 vals = []
324 val = subconfig.get(fixture_name, [])
325 # If that exact name is a key in the data source:
326 if val:
327 # ... use the dict value as a single parameter value.
328 vals = (val, )
329 else:
330 # ... otherwise, see if there's a key that contains a list of
331 # values to use instead.
332 vals = subconfig.get(fixture_name+ 's', [])
333 def fixture_id(index, val):
334 try:
335 return val['fixture_id']
336 except:
337 return fixture_name + str(index)
338 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
339 metafunc.parametrize(fixture_name, vals, ids=ids)
340
341def pytest_generate_tests(metafunc):
342 """pytest hook: parameterize test functions based on custom rules.
343
344 Check each test function parameter (fixture name) to see if it is one of
345 our custom names, and if so, provide the correct parametrization for that
346 parameter.
347
348 Args:
349 metafunc: The pytest test function.
350
351 Returns:
352 Nothing.
353 """
Stephen Warren10e50632016-01-15 11:15:24 -0700354 for fn in metafunc.fixturenames:
Stephen Warren770fe172016-02-08 14:44:16 -0700355 if fn == 'ut_subtest':
Simon Glassed298be2020-10-25 20:38:31 -0600356 generate_ut_subtest(metafunc, fn, '/u-boot.sym')
357 continue
Simon Glassb6c665f2022-04-30 00:56:55 -0600358 m_subtest = re.match('ut_(.)pl_subtest', fn)
359 if m_subtest:
360 spl_name = m_subtest.group(1)
361 generate_ut_subtest(
362 metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym')
Stephen Warren10e50632016-01-15 11:15:24 -0700363 continue
Stephen Warren770fe172016-02-08 14:44:16 -0700364 generate_config(metafunc, fn)
Stephen Warren10e50632016-01-15 11:15:24 -0700365
Stefan Brüns364ea872016-11-05 17:45:32 +0100366@pytest.fixture(scope='session')
367def u_boot_log(request):
368 """Generate the value of a test's log fixture.
369
370 Args:
371 request: The pytest request.
372
373 Returns:
374 The fixture value.
375 """
376
377 return console.log
378
379@pytest.fixture(scope='session')
380def u_boot_config(request):
381 """Generate the value of a test's u_boot_config fixture.
382
383 Args:
384 request: The pytest request.
385
386 Returns:
387 The fixture value.
388 """
389
390 return console.config
391
Stephen Warrene1d24d02016-01-22 12:30:08 -0700392@pytest.fixture(scope='function')
Stephen Warren10e50632016-01-15 11:15:24 -0700393def u_boot_console(request):
Stephen Warren75e731e2016-01-26 13:41:30 -0700394 """Generate the value of a test's u_boot_console fixture.
Stephen Warren10e50632016-01-15 11:15:24 -0700395
396 Args:
397 request: The pytest request.
398
399 Returns:
400 The fixture value.
Stephen Warren75e731e2016-01-26 13:41:30 -0700401 """
Stephen Warren10e50632016-01-15 11:15:24 -0700402
Stephen Warrene1d24d02016-01-22 12:30:08 -0700403 console.ensure_spawned()
Stephen Warren10e50632016-01-15 11:15:24 -0700404 return console
405
Stephen Warrene3f2a502016-02-03 16:46:34 -0700406anchors = {}
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700407tests_not_run = []
408tests_failed = []
409tests_xpassed = []
410tests_xfailed = []
411tests_skipped = []
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700412tests_warning = []
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700413tests_passed = []
Stephen Warren10e50632016-01-15 11:15:24 -0700414
415def pytest_itemcollected(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700416 """pytest hook: Called once for each test found during collection.
Stephen Warren10e50632016-01-15 11:15:24 -0700417
418 This enables our custom result analysis code to see the list of all tests
419 that should eventually be run.
420
421 Args:
422 item: The item that was collected.
423
424 Returns:
425 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700426 """
Stephen Warren10e50632016-01-15 11:15:24 -0700427
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700428 tests_not_run.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700429
430def cleanup():
Stephen Warren75e731e2016-01-26 13:41:30 -0700431 """Clean up all global state.
Stephen Warren10e50632016-01-15 11:15:24 -0700432
433 Executed (via atexit) once the entire test process is complete. This
434 includes logging the status of all tests, and the identity of any failed
435 or skipped tests.
436
437 Args:
438 None.
439
440 Returns:
441 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700442 """
Stephen Warren10e50632016-01-15 11:15:24 -0700443
444 if console:
445 console.close()
446 if log:
Stephen Warrene3f2a502016-02-03 16:46:34 -0700447 with log.section('Status Report', 'status_report'):
448 log.status_pass('%d passed' % len(tests_passed))
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700449 if tests_warning:
450 log.status_warning('%d passed with warning' % len(tests_warning))
451 for test in tests_warning:
452 anchor = anchors.get(test, None)
453 log.status_warning('... ' + test, anchor)
Stephen Warrene3f2a502016-02-03 16:46:34 -0700454 if tests_skipped:
455 log.status_skipped('%d skipped' % len(tests_skipped))
456 for test in tests_skipped:
457 anchor = anchors.get(test, None)
458 log.status_skipped('... ' + test, anchor)
459 if tests_xpassed:
460 log.status_xpass('%d xpass' % len(tests_xpassed))
461 for test in tests_xpassed:
462 anchor = anchors.get(test, None)
463 log.status_xpass('... ' + test, anchor)
464 if tests_xfailed:
465 log.status_xfail('%d xfail' % len(tests_xfailed))
466 for test in tests_xfailed:
467 anchor = anchors.get(test, None)
468 log.status_xfail('... ' + test, anchor)
469 if tests_failed:
470 log.status_fail('%d failed' % len(tests_failed))
471 for test in tests_failed:
472 anchor = anchors.get(test, None)
473 log.status_fail('... ' + test, anchor)
474 if tests_not_run:
475 log.status_fail('%d not run' % len(tests_not_run))
476 for test in tests_not_run:
477 anchor = anchors.get(test, None)
478 log.status_fail('... ' + test, anchor)
Stephen Warren10e50632016-01-15 11:15:24 -0700479 log.close()
480atexit.register(cleanup)
481
482def setup_boardspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700483 """Process any 'boardspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700484
485 Such a marker lists the set of board types that a test does/doesn't
486 support. If tests are being executed on an unsupported board, the test is
487 marked to be skipped.
488
489 Args:
490 item: The pytest test item.
491
492 Returns:
493 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700494 """
Stephen Warren10e50632016-01-15 11:15:24 -0700495
Stephen Warren10e50632016-01-15 11:15:24 -0700496 required_boards = []
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400497 for boards in item.iter_markers('boardspec'):
498 board = boards.args[0]
Stephen Warren10e50632016-01-15 11:15:24 -0700499 if board.startswith('!'):
500 if ubconfig.board_type == board[1:]:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600501 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700502 return
503 else:
504 required_boards.append(board)
505 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600506 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700507
508def setup_buildconfigspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700509 """Process any 'buildconfigspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700510
511 Such a marker lists some U-Boot configuration feature that the test
512 requires. If tests are being executed on an U-Boot build that doesn't
513 have the required feature, the test is marked to be skipped.
514
515 Args:
516 item: The pytest test item.
517
518 Returns:
519 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700520 """
Stephen Warren10e50632016-01-15 11:15:24 -0700521
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400522 for options in item.iter_markers('buildconfigspec'):
523 option = options.args[0]
524 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
525 pytest.skip('.config feature "%s" not enabled' % option.lower())
Cristian Ciocaltea6c6c8072019-12-24 17:19:12 +0200526 for options in item.iter_markers('notbuildconfigspec'):
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400527 option = options.args[0]
528 if ubconfig.buildconfig.get('config_' + option.lower(), None):
529 pytest.skip('.config feature "%s" enabled' % option.lower())
Stephen Warren10e50632016-01-15 11:15:24 -0700530
Stephen Warren2079db32017-09-18 11:11:49 -0600531def tool_is_in_path(tool):
532 for path in os.environ["PATH"].split(os.pathsep):
533 fn = os.path.join(path, tool)
534 if os.path.isfile(fn) and os.access(fn, os.X_OK):
535 return True
536 return False
537
538def setup_requiredtool(item):
539 """Process any 'requiredtool' marker for a test.
540
541 Such a marker lists some external tool (binary, executable, application)
542 that the test requires. If tests are being executed on a system that
543 doesn't have the required tool, the test is marked to be skipped.
544
545 Args:
546 item: The pytest test item.
547
548 Returns:
549 Nothing.
550 """
551
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400552 for tools in item.iter_markers('requiredtool'):
553 tool = tools.args[0]
Stephen Warren2079db32017-09-18 11:11:49 -0600554 if not tool_is_in_path(tool):
555 pytest.skip('tool "%s" not in $PATH' % tool)
556
Simon Glass54ef2ca2022-08-06 17:51:47 -0600557def setup_singlethread(item):
558 """Process any 'singlethread' marker for a test.
559
560 Skip this test if running in parallel.
561
562 Args:
563 item: The pytest test item.
564
565 Returns:
566 Nothing.
567 """
568 for single in item.iter_markers('singlethread'):
569 worker_id = os.environ.get("PYTEST_XDIST_WORKER")
570 if worker_id and worker_id != 'master':
571 pytest.skip('must run single-threaded')
572
Stephen Warren3e3d1432016-10-17 17:25:52 -0600573def start_test_section(item):
574 anchors[item.name] = log.start_section(item.name)
575
Stephen Warren10e50632016-01-15 11:15:24 -0700576def pytest_runtest_setup(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700577 """pytest hook: Configure (set up) a test item.
Stephen Warren10e50632016-01-15 11:15:24 -0700578
579 Called once for each test to perform any custom configuration. This hook
580 is used to skip the test if certain conditions apply.
581
582 Args:
583 item: The pytest test item.
584
585 Returns:
586 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700587 """
Stephen Warren10e50632016-01-15 11:15:24 -0700588
Stephen Warren3e3d1432016-10-17 17:25:52 -0600589 start_test_section(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700590 setup_boardspec(item)
591 setup_buildconfigspec(item)
Stephen Warren2079db32017-09-18 11:11:49 -0600592 setup_requiredtool(item)
Simon Glass54ef2ca2022-08-06 17:51:47 -0600593 setup_singlethread(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700594
595def pytest_runtest_protocol(item, nextitem):
Stephen Warren75e731e2016-01-26 13:41:30 -0700596 """pytest hook: Called to execute a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700597
598 This hook wraps the standard pytest runtestprotocol() function in order
599 to acquire visibility into, and record, each test function's result.
600
601 Args:
602 item: The pytest test item to execute.
603 nextitem: The pytest test item that will be executed after this one.
604
605 Returns:
606 A list of pytest reports (test result data).
Stephen Warren75e731e2016-01-26 13:41:30 -0700607 """
Stephen Warren10e50632016-01-15 11:15:24 -0700608
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700609 log.get_and_reset_warning()
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700610 ihook = item.ihook
611 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
Stephen Warren10e50632016-01-15 11:15:24 -0700612 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700613 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700614 was_warning = log.get_and_reset_warning()
Stephen Warren25b05242016-01-27 23:57:51 -0700615
Stephen Warren3e3d1432016-10-17 17:25:52 -0600616 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
617 # the test is skipped. That call is required to create the test's section
618 # in the log file. The call to log.end_section() requires that the log
619 # contain a section for this test. Create a section for the test if it
620 # doesn't already exist.
621 if not item.name in anchors:
622 start_test_section(item)
623
Stephen Warren25b05242016-01-27 23:57:51 -0700624 failure_cleanup = False
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700625 if not was_warning:
626 test_list = tests_passed
627 msg = 'OK'
628 msg_log = log.status_pass
629 else:
630 test_list = tests_warning
631 msg = 'OK (with warning)'
632 msg_log = log.status_warning
Stephen Warren10e50632016-01-15 11:15:24 -0700633 for report in reports:
634 if report.outcome == 'failed':
Stephen Warren25b05242016-01-27 23:57:51 -0700635 if hasattr(report, 'wasxfail'):
636 test_list = tests_xpassed
637 msg = 'XPASSED'
638 msg_log = log.status_xpass
639 else:
640 failure_cleanup = True
641 test_list = tests_failed
642 msg = 'FAILED:\n' + str(report.longrepr)
643 msg_log = log.status_fail
Stephen Warren10e50632016-01-15 11:15:24 -0700644 break
645 if report.outcome == 'skipped':
Stephen Warren25b05242016-01-27 23:57:51 -0700646 if hasattr(report, 'wasxfail'):
647 failure_cleanup = True
648 test_list = tests_xfailed
649 msg = 'XFAILED:\n' + str(report.longrepr)
650 msg_log = log.status_xfail
651 break
652 test_list = tests_skipped
653 msg = 'SKIPPED:\n' + str(report.longrepr)
654 msg_log = log.status_skipped
Stephen Warren10e50632016-01-15 11:15:24 -0700655
Stephen Warren25b05242016-01-27 23:57:51 -0700656 if failure_cleanup:
Stephen Warren97a54662016-01-22 12:30:09 -0700657 console.drain_console()
Stephen Warren25b05242016-01-27 23:57:51 -0700658
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700659 test_list.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700660 tests_not_run.remove(item.name)
661
662 try:
Stephen Warren25b05242016-01-27 23:57:51 -0700663 msg_log(msg)
Stephen Warren10e50632016-01-15 11:15:24 -0700664 except:
665 # If something went wrong with logging, it's better to let the test
666 # process continue, which may report other exceptions that triggered
667 # the logging issue (e.g. console.log wasn't created). Hence, just
668 # squash the exception. If the test setup failed due to e.g. syntax
669 # error somewhere else, this won't be seen. However, once that issue
670 # is fixed, if this exception still exists, it will then be logged as
671 # part of the test's stdout.
672 import traceback
Paul Burton00f2d202017-09-14 14:34:43 -0700673 print('Exception occurred while logging runtest status:')
Stephen Warren10e50632016-01-15 11:15:24 -0700674 traceback.print_exc()
675 # FIXME: Can we force a test failure here?
676
677 log.end_section(item.name)
678
Stephen Warren25b05242016-01-27 23:57:51 -0700679 if failure_cleanup:
Stephen Warren10e50632016-01-15 11:15:24 -0700680 console.cleanup_spawn()
681
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700682 return True