blob: 906387db1c84fb9289f09d539516ae2120cc8b7d [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warren10e50632016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warren10e50632016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
Tom Rini6a990412019-10-24 11:59:21 -040016import configparser
Stephen Warren10e50632016-01-15 11:15:24 -070017import errno
Tom Rini6a990412019-10-24 11:59:21 -040018import io
Stephen Warren10e50632016-01-15 11:15:24 -070019import os
20import os.path
Stephen Warren10e50632016-01-15 11:15:24 -070021import pytest
Stephen Warren770fe172016-02-08 14:44:16 -070022import re
Tom Rini6a990412019-10-24 11:59:21 -040023from _pytest.runner import runtestprotocol
Stephen Warren10e50632016-01-15 11:15:24 -070024import sys
25
26# Globals: The HTML log file, and the connection to the U-Boot console.
27log = None
28console = None
29
30def mkdir_p(path):
Stephen Warren75e731e2016-01-26 13:41:30 -070031 """Create a directory path.
Stephen Warren10e50632016-01-15 11:15:24 -070032
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
35
36 Args:
37 path: The directory path to create.
38
39 Returns:
40 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070041 """
Stephen Warren10e50632016-01-15 11:15:24 -070042
43 try:
44 os.makedirs(path)
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
47 pass
48 else:
49 raise
50
51def pytest_addoption(parser):
Stephen Warren75e731e2016-01-26 13:41:30 -070052 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warren10e50632016-01-15 11:15:24 -070053
54 Args:
55 parser: The pytest command-line parser.
56
57 Returns:
58 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070059 """
Stephen Warren10e50632016-01-15 11:15:24 -070060
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
Simon Glass6e094842020-03-18 09:43:01 -060073 parser.addoption('--buildman', default=False, action='store_true',
74 help='Use buildman to build U-Boot (assuming --build is given)')
Stephen Warren33db1ee2016-02-04 16:11:50 -070075 parser.addoption('--gdbserver', default=None,
76 help='Run sandbox under gdbserver. The argument is the channel '+
77 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warren10e50632016-01-15 11:15:24 -070078
Simon Glass686fad72022-08-06 17:51:56 -060079def run_build(config, source_dir, build_dir, board_type, log):
80 """run_build: Build U-Boot
81
82 Args:
83 config: The pytest configuration.
84 soruce_dir (str): Directory containing source code
85 build_dir (str): Directory to build in
86 board_type (str): board_type parameter (e.g. 'sandbox')
87 log (Logfile): Log file to use
88 """
89 if config.getoption('buildman'):
90 if build_dir != source_dir:
91 dest_args = ['-o', build_dir, '-w']
92 else:
93 dest_args = ['-i']
94 cmds = (['buildman', '--board', board_type] + dest_args,)
95 name = 'buildman'
96 else:
97 if build_dir != source_dir:
98 o_opt = 'O=%s' % build_dir
99 else:
100 o_opt = ''
101 cmds = (
102 ['make', o_opt, '-s', board_type + '_defconfig'],
103 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
104 )
105 name = 'make'
106
107 with log.section(name):
108 runner = log.get_runner(name, sys.stdout)
109 for cmd in cmds:
110 runner.run(cmd, cwd=source_dir)
111 runner.close()
112 log.status_pass('OK')
113
Stephen Warren10e50632016-01-15 11:15:24 -0700114def pytest_configure(config):
Stephen Warren75e731e2016-01-26 13:41:30 -0700115 """pytest hook: Perform custom initialization at startup time.
Stephen Warren10e50632016-01-15 11:15:24 -0700116
117 Args:
118 config: The pytest configuration.
119
120 Returns:
121 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700122 """
Simon Glassde8e25b2019-12-01 19:34:18 -0700123 def parse_config(conf_file):
124 """Parse a config file, loading it into the ubconfig container
125
126 Args:
127 conf_file: Filename to load (within build_dir)
128
129 Raises
130 Exception if the file does not exist
131 """
132 dot_config = build_dir + '/' + conf_file
133 if not os.path.exists(dot_config):
134 raise Exception(conf_file + ' does not exist; ' +
135 'try passing --build option?')
136
137 with open(dot_config, 'rt') as f:
138 ini_str = '[root]\n' + f.read()
139 ini_sio = io.StringIO(ini_str)
140 parser = configparser.RawConfigParser()
141 parser.read_file(ini_sio)
142 ubconfig.buildconfig.update(parser.items('root'))
Stephen Warren10e50632016-01-15 11:15:24 -0700143
144 global log
145 global console
146 global ubconfig
147
148 test_py_dir = os.path.dirname(os.path.abspath(__file__))
149 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
150
151 board_type = config.getoption('board_type')
152 board_type_filename = board_type.replace('-', '_')
153
154 board_identity = config.getoption('board_identity')
155 board_identity_filename = board_identity.replace('-', '_')
156
157 build_dir = config.getoption('build_dir')
158 if not build_dir:
159 build_dir = source_dir + '/build-' + board_type
160 mkdir_p(build_dir)
161
162 result_dir = config.getoption('result_dir')
163 if not result_dir:
164 result_dir = build_dir
165 mkdir_p(result_dir)
166
167 persistent_data_dir = config.getoption('persistent_data_dir')
168 if not persistent_data_dir:
169 persistent_data_dir = build_dir + '/persistent-data'
170 mkdir_p(persistent_data_dir)
171
Stephen Warren33db1ee2016-02-04 16:11:50 -0700172 gdbserver = config.getoption('gdbserver')
Igor Opaniukea5f17d2019-02-12 16:18:14 +0200173 if gdbserver and not board_type.startswith('sandbox'):
174 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren33db1ee2016-02-04 16:11:50 -0700175
Stephen Warren10e50632016-01-15 11:15:24 -0700176 import multiplexed_log
177 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
178
179 if config.getoption('build'):
Simon Glass686fad72022-08-06 17:51:56 -0600180 run_build(config, source_dir, build_dir, board_type, log)
Stephen Warren10e50632016-01-15 11:15:24 -0700181
182 class ArbitraryAttributeContainer(object):
183 pass
184
185 ubconfig = ArbitraryAttributeContainer()
186 ubconfig.brd = dict()
187 ubconfig.env = dict()
188
189 modules = [
190 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
191 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
192 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
193 board_identity_filename),
194 ]
195 for (dict_to_fill, module_name) in modules:
196 try:
197 module = __import__(module_name)
198 except ImportError:
199 continue
200 dict_to_fill.update(module.__dict__)
201
202 ubconfig.buildconfig = dict()
203
Simon Glassde8e25b2019-12-01 19:34:18 -0700204 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
205 # as the standard U-Boot build which leaves it in include/autoconf.mk
206 parse_config('.config')
207 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
208 parse_config('autoconf.mk')
209 else:
210 parse_config('include/autoconf.mk')
Stephen Warren10e50632016-01-15 11:15:24 -0700211
212 ubconfig.test_py_dir = test_py_dir
213 ubconfig.source_dir = source_dir
214 ubconfig.build_dir = build_dir
215 ubconfig.result_dir = result_dir
216 ubconfig.persistent_data_dir = persistent_data_dir
217 ubconfig.board_type = board_type
218 ubconfig.board_identity = board_identity
Stephen Warren33db1ee2016-02-04 16:11:50 -0700219 ubconfig.gdbserver = gdbserver
Simon Glass3b097872016-07-03 09:40:36 -0600220 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warren10e50632016-01-15 11:15:24 -0700221
222 env_vars = (
223 'board_type',
224 'board_identity',
225 'source_dir',
226 'test_py_dir',
227 'build_dir',
228 'result_dir',
229 'persistent_data_dir',
230 )
231 for v in env_vars:
232 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
233
Simon Glass13f422e2016-07-04 11:58:37 -0600234 if board_type.startswith('sandbox'):
Stephen Warren10e50632016-01-15 11:15:24 -0700235 import u_boot_console_sandbox
236 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
237 else:
238 import u_boot_console_exec_attach
239 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
240
Simon Glass23300b42021-10-23 17:26:11 -0600241re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$')
Simon Glassed298be2020-10-25 20:38:31 -0600242def generate_ut_subtest(metafunc, fixture_name, sym_path):
Stephen Warren770fe172016-02-08 14:44:16 -0700243 """Provide parametrization for a ut_subtest fixture.
244
245 Determines the set of unit tests built into a U-Boot binary by parsing the
246 list of symbols generated by the build process. Provides this information
247 to test functions by parameterizing their ut_subtest fixture parameter.
248
249 Args:
250 metafunc: The pytest test function.
251 fixture_name: The fixture name to test.
Simon Glassed298be2020-10-25 20:38:31 -0600252 sym_path: Relative path to the symbol file with preceding '/'
253 (e.g. '/u-boot.sym')
Stephen Warren770fe172016-02-08 14:44:16 -0700254
255 Returns:
256 Nothing.
257 """
Simon Glassed298be2020-10-25 20:38:31 -0600258 fn = console.config.build_dir + sym_path
Stephen Warren770fe172016-02-08 14:44:16 -0700259 try:
260 with open(fn, 'rt') as f:
261 lines = f.readlines()
262 except:
263 lines = []
264 lines.sort()
265
266 vals = []
267 for l in lines:
268 m = re_ut_test_list.search(l)
269 if not m:
270 continue
271 vals.append(m.group(1) + ' ' + m.group(2))
272
273 ids = ['ut_' + s.replace(' ', '_') for s in vals]
274 metafunc.parametrize(fixture_name, vals, ids=ids)
275
276def generate_config(metafunc, fixture_name):
277 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warren10e50632016-01-15 11:15:24 -0700278
279 If a test function takes parameter(s) (fixture names) of the form brd__xxx
280 or env__xxx, the brd and env configuration dictionaries are consulted to
281 find the list of values to use for those parameters, and the test is
282 parametrized so that it runs once for each combination of values.
283
284 Args:
285 metafunc: The pytest test function.
Stephen Warren770fe172016-02-08 14:44:16 -0700286 fixture_name: The fixture name to test.
Stephen Warren10e50632016-01-15 11:15:24 -0700287
288 Returns:
289 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700290 """
Stephen Warren10e50632016-01-15 11:15:24 -0700291
292 subconfigs = {
293 'brd': console.config.brd,
294 'env': console.config.env,
295 }
Stephen Warren770fe172016-02-08 14:44:16 -0700296 parts = fixture_name.split('__')
297 if len(parts) < 2:
298 return
299 if parts[0] not in subconfigs:
300 return
301 subconfig = subconfigs[parts[0]]
302 vals = []
303 val = subconfig.get(fixture_name, [])
304 # If that exact name is a key in the data source:
305 if val:
306 # ... use the dict value as a single parameter value.
307 vals = (val, )
308 else:
309 # ... otherwise, see if there's a key that contains a list of
310 # values to use instead.
311 vals = subconfig.get(fixture_name+ 's', [])
312 def fixture_id(index, val):
313 try:
314 return val['fixture_id']
315 except:
316 return fixture_name + str(index)
317 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
318 metafunc.parametrize(fixture_name, vals, ids=ids)
319
320def pytest_generate_tests(metafunc):
321 """pytest hook: parameterize test functions based on custom rules.
322
323 Check each test function parameter (fixture name) to see if it is one of
324 our custom names, and if so, provide the correct parametrization for that
325 parameter.
326
327 Args:
328 metafunc: The pytest test function.
329
330 Returns:
331 Nothing.
332 """
Stephen Warren10e50632016-01-15 11:15:24 -0700333 for fn in metafunc.fixturenames:
Stephen Warren770fe172016-02-08 14:44:16 -0700334 if fn == 'ut_subtest':
Simon Glassed298be2020-10-25 20:38:31 -0600335 generate_ut_subtest(metafunc, fn, '/u-boot.sym')
336 continue
Simon Glassb6c665f2022-04-30 00:56:55 -0600337 m_subtest = re.match('ut_(.)pl_subtest', fn)
338 if m_subtest:
339 spl_name = m_subtest.group(1)
340 generate_ut_subtest(
341 metafunc, fn, f'/{spl_name}pl/u-boot-{spl_name}pl.sym')
Stephen Warren10e50632016-01-15 11:15:24 -0700342 continue
Stephen Warren770fe172016-02-08 14:44:16 -0700343 generate_config(metafunc, fn)
Stephen Warren10e50632016-01-15 11:15:24 -0700344
Stefan Brüns364ea872016-11-05 17:45:32 +0100345@pytest.fixture(scope='session')
346def u_boot_log(request):
347 """Generate the value of a test's log fixture.
348
349 Args:
350 request: The pytest request.
351
352 Returns:
353 The fixture value.
354 """
355
356 return console.log
357
358@pytest.fixture(scope='session')
359def u_boot_config(request):
360 """Generate the value of a test's u_boot_config fixture.
361
362 Args:
363 request: The pytest request.
364
365 Returns:
366 The fixture value.
367 """
368
369 return console.config
370
Stephen Warrene1d24d02016-01-22 12:30:08 -0700371@pytest.fixture(scope='function')
Stephen Warren10e50632016-01-15 11:15:24 -0700372def u_boot_console(request):
Stephen Warren75e731e2016-01-26 13:41:30 -0700373 """Generate the value of a test's u_boot_console fixture.
Stephen Warren10e50632016-01-15 11:15:24 -0700374
375 Args:
376 request: The pytest request.
377
378 Returns:
379 The fixture value.
Stephen Warren75e731e2016-01-26 13:41:30 -0700380 """
Stephen Warren10e50632016-01-15 11:15:24 -0700381
Stephen Warrene1d24d02016-01-22 12:30:08 -0700382 console.ensure_spawned()
Stephen Warren10e50632016-01-15 11:15:24 -0700383 return console
384
Stephen Warrene3f2a502016-02-03 16:46:34 -0700385anchors = {}
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700386tests_not_run = []
387tests_failed = []
388tests_xpassed = []
389tests_xfailed = []
390tests_skipped = []
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700391tests_warning = []
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700392tests_passed = []
Stephen Warren10e50632016-01-15 11:15:24 -0700393
394def pytest_itemcollected(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700395 """pytest hook: Called once for each test found during collection.
Stephen Warren10e50632016-01-15 11:15:24 -0700396
397 This enables our custom result analysis code to see the list of all tests
398 that should eventually be run.
399
400 Args:
401 item: The item that was collected.
402
403 Returns:
404 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700405 """
Stephen Warren10e50632016-01-15 11:15:24 -0700406
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700407 tests_not_run.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700408
409def cleanup():
Stephen Warren75e731e2016-01-26 13:41:30 -0700410 """Clean up all global state.
Stephen Warren10e50632016-01-15 11:15:24 -0700411
412 Executed (via atexit) once the entire test process is complete. This
413 includes logging the status of all tests, and the identity of any failed
414 or skipped tests.
415
416 Args:
417 None.
418
419 Returns:
420 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700421 """
Stephen Warren10e50632016-01-15 11:15:24 -0700422
423 if console:
424 console.close()
425 if log:
Stephen Warrene3f2a502016-02-03 16:46:34 -0700426 with log.section('Status Report', 'status_report'):
427 log.status_pass('%d passed' % len(tests_passed))
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700428 if tests_warning:
429 log.status_warning('%d passed with warning' % len(tests_warning))
430 for test in tests_warning:
431 anchor = anchors.get(test, None)
432 log.status_warning('... ' + test, anchor)
Stephen Warrene3f2a502016-02-03 16:46:34 -0700433 if tests_skipped:
434 log.status_skipped('%d skipped' % len(tests_skipped))
435 for test in tests_skipped:
436 anchor = anchors.get(test, None)
437 log.status_skipped('... ' + test, anchor)
438 if tests_xpassed:
439 log.status_xpass('%d xpass' % len(tests_xpassed))
440 for test in tests_xpassed:
441 anchor = anchors.get(test, None)
442 log.status_xpass('... ' + test, anchor)
443 if tests_xfailed:
444 log.status_xfail('%d xfail' % len(tests_xfailed))
445 for test in tests_xfailed:
446 anchor = anchors.get(test, None)
447 log.status_xfail('... ' + test, anchor)
448 if tests_failed:
449 log.status_fail('%d failed' % len(tests_failed))
450 for test in tests_failed:
451 anchor = anchors.get(test, None)
452 log.status_fail('... ' + test, anchor)
453 if tests_not_run:
454 log.status_fail('%d not run' % len(tests_not_run))
455 for test in tests_not_run:
456 anchor = anchors.get(test, None)
457 log.status_fail('... ' + test, anchor)
Stephen Warren10e50632016-01-15 11:15:24 -0700458 log.close()
459atexit.register(cleanup)
460
461def setup_boardspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700462 """Process any 'boardspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700463
464 Such a marker lists the set of board types that a test does/doesn't
465 support. If tests are being executed on an unsupported board, the test is
466 marked to be skipped.
467
468 Args:
469 item: The pytest test item.
470
471 Returns:
472 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700473 """
Stephen Warren10e50632016-01-15 11:15:24 -0700474
Stephen Warren10e50632016-01-15 11:15:24 -0700475 required_boards = []
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400476 for boards in item.iter_markers('boardspec'):
477 board = boards.args[0]
Stephen Warren10e50632016-01-15 11:15:24 -0700478 if board.startswith('!'):
479 if ubconfig.board_type == board[1:]:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600480 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700481 return
482 else:
483 required_boards.append(board)
484 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600485 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700486
487def setup_buildconfigspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700488 """Process any 'buildconfigspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700489
490 Such a marker lists some U-Boot configuration feature that the test
491 requires. If tests are being executed on an U-Boot build that doesn't
492 have the required feature, the test is marked to be skipped.
493
494 Args:
495 item: The pytest test item.
496
497 Returns:
498 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700499 """
Stephen Warren10e50632016-01-15 11:15:24 -0700500
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400501 for options in item.iter_markers('buildconfigspec'):
502 option = options.args[0]
503 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
504 pytest.skip('.config feature "%s" not enabled' % option.lower())
Cristian Ciocaltea6c6c8072019-12-24 17:19:12 +0200505 for options in item.iter_markers('notbuildconfigspec'):
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400506 option = options.args[0]
507 if ubconfig.buildconfig.get('config_' + option.lower(), None):
508 pytest.skip('.config feature "%s" enabled' % option.lower())
Stephen Warren10e50632016-01-15 11:15:24 -0700509
Stephen Warren2079db32017-09-18 11:11:49 -0600510def tool_is_in_path(tool):
511 for path in os.environ["PATH"].split(os.pathsep):
512 fn = os.path.join(path, tool)
513 if os.path.isfile(fn) and os.access(fn, os.X_OK):
514 return True
515 return False
516
517def setup_requiredtool(item):
518 """Process any 'requiredtool' marker for a test.
519
520 Such a marker lists some external tool (binary, executable, application)
521 that the test requires. If tests are being executed on a system that
522 doesn't have the required tool, the test is marked to be skipped.
523
524 Args:
525 item: The pytest test item.
526
527 Returns:
528 Nothing.
529 """
530
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400531 for tools in item.iter_markers('requiredtool'):
532 tool = tools.args[0]
Stephen Warren2079db32017-09-18 11:11:49 -0600533 if not tool_is_in_path(tool):
534 pytest.skip('tool "%s" not in $PATH' % tool)
535
Simon Glass54ef2ca2022-08-06 17:51:47 -0600536def setup_singlethread(item):
537 """Process any 'singlethread' marker for a test.
538
539 Skip this test if running in parallel.
540
541 Args:
542 item: The pytest test item.
543
544 Returns:
545 Nothing.
546 """
547 for single in item.iter_markers('singlethread'):
548 worker_id = os.environ.get("PYTEST_XDIST_WORKER")
549 if worker_id and worker_id != 'master':
550 pytest.skip('must run single-threaded')
551
Stephen Warren3e3d1432016-10-17 17:25:52 -0600552def start_test_section(item):
553 anchors[item.name] = log.start_section(item.name)
554
Stephen Warren10e50632016-01-15 11:15:24 -0700555def pytest_runtest_setup(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700556 """pytest hook: Configure (set up) a test item.
Stephen Warren10e50632016-01-15 11:15:24 -0700557
558 Called once for each test to perform any custom configuration. This hook
559 is used to skip the test if certain conditions apply.
560
561 Args:
562 item: The pytest test item.
563
564 Returns:
565 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700566 """
Stephen Warren10e50632016-01-15 11:15:24 -0700567
Stephen Warren3e3d1432016-10-17 17:25:52 -0600568 start_test_section(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700569 setup_boardspec(item)
570 setup_buildconfigspec(item)
Stephen Warren2079db32017-09-18 11:11:49 -0600571 setup_requiredtool(item)
Simon Glass54ef2ca2022-08-06 17:51:47 -0600572 setup_singlethread(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700573
574def pytest_runtest_protocol(item, nextitem):
Stephen Warren75e731e2016-01-26 13:41:30 -0700575 """pytest hook: Called to execute a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700576
577 This hook wraps the standard pytest runtestprotocol() function in order
578 to acquire visibility into, and record, each test function's result.
579
580 Args:
581 item: The pytest test item to execute.
582 nextitem: The pytest test item that will be executed after this one.
583
584 Returns:
585 A list of pytest reports (test result data).
Stephen Warren75e731e2016-01-26 13:41:30 -0700586 """
Stephen Warren10e50632016-01-15 11:15:24 -0700587
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700588 log.get_and_reset_warning()
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700589 ihook = item.ihook
590 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
Stephen Warren10e50632016-01-15 11:15:24 -0700591 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700592 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700593 was_warning = log.get_and_reset_warning()
Stephen Warren25b05242016-01-27 23:57:51 -0700594
Stephen Warren3e3d1432016-10-17 17:25:52 -0600595 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
596 # the test is skipped. That call is required to create the test's section
597 # in the log file. The call to log.end_section() requires that the log
598 # contain a section for this test. Create a section for the test if it
599 # doesn't already exist.
600 if not item.name in anchors:
601 start_test_section(item)
602
Stephen Warren25b05242016-01-27 23:57:51 -0700603 failure_cleanup = False
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700604 if not was_warning:
605 test_list = tests_passed
606 msg = 'OK'
607 msg_log = log.status_pass
608 else:
609 test_list = tests_warning
610 msg = 'OK (with warning)'
611 msg_log = log.status_warning
Stephen Warren10e50632016-01-15 11:15:24 -0700612 for report in reports:
613 if report.outcome == 'failed':
Stephen Warren25b05242016-01-27 23:57:51 -0700614 if hasattr(report, 'wasxfail'):
615 test_list = tests_xpassed
616 msg = 'XPASSED'
617 msg_log = log.status_xpass
618 else:
619 failure_cleanup = True
620 test_list = tests_failed
621 msg = 'FAILED:\n' + str(report.longrepr)
622 msg_log = log.status_fail
Stephen Warren10e50632016-01-15 11:15:24 -0700623 break
624 if report.outcome == 'skipped':
Stephen Warren25b05242016-01-27 23:57:51 -0700625 if hasattr(report, 'wasxfail'):
626 failure_cleanup = True
627 test_list = tests_xfailed
628 msg = 'XFAILED:\n' + str(report.longrepr)
629 msg_log = log.status_xfail
630 break
631 test_list = tests_skipped
632 msg = 'SKIPPED:\n' + str(report.longrepr)
633 msg_log = log.status_skipped
Stephen Warren10e50632016-01-15 11:15:24 -0700634
Stephen Warren25b05242016-01-27 23:57:51 -0700635 if failure_cleanup:
Stephen Warren97a54662016-01-22 12:30:09 -0700636 console.drain_console()
Stephen Warren25b05242016-01-27 23:57:51 -0700637
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700638 test_list.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700639 tests_not_run.remove(item.name)
640
641 try:
Stephen Warren25b05242016-01-27 23:57:51 -0700642 msg_log(msg)
Stephen Warren10e50632016-01-15 11:15:24 -0700643 except:
644 # If something went wrong with logging, it's better to let the test
645 # process continue, which may report other exceptions that triggered
646 # the logging issue (e.g. console.log wasn't created). Hence, just
647 # squash the exception. If the test setup failed due to e.g. syntax
648 # error somewhere else, this won't be seen. However, once that issue
649 # is fixed, if this exception still exists, it will then be logged as
650 # part of the test's stdout.
651 import traceback
Paul Burton00f2d202017-09-14 14:34:43 -0700652 print('Exception occurred while logging runtest status:')
Stephen Warren10e50632016-01-15 11:15:24 -0700653 traceback.print_exc()
654 # FIXME: Can we force a test failure here?
655
656 log.end_section(item.name)
657
Stephen Warren25b05242016-01-27 23:57:51 -0700658 if failure_cleanup:
Stephen Warren10e50632016-01-15 11:15:24 -0700659 console.cleanup_spawn()
660
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700661 return True