blob: 16e445cd8eeae62a82397d0a5f13700db3034248 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warren10e50632016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warren10e50632016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
Tom Rini6a990412019-10-24 11:59:21 -040016import configparser
Stephen Warren10e50632016-01-15 11:15:24 -070017import errno
Tom Rini6a990412019-10-24 11:59:21 -040018import io
Stephen Warren10e50632016-01-15 11:15:24 -070019import os
20import os.path
Stephen Warren10e50632016-01-15 11:15:24 -070021import pytest
Stephen Warren770fe172016-02-08 14:44:16 -070022import re
Tom Rini6a990412019-10-24 11:59:21 -040023from _pytest.runner import runtestprotocol
Stephen Warren10e50632016-01-15 11:15:24 -070024import sys
25
26# Globals: The HTML log file, and the connection to the U-Boot console.
27log = None
28console = None
29
30def mkdir_p(path):
Stephen Warren75e731e2016-01-26 13:41:30 -070031 """Create a directory path.
Stephen Warren10e50632016-01-15 11:15:24 -070032
33 This includes creating any intermediate/parent directories. Any errors
34 caused due to already extant directories are ignored.
35
36 Args:
37 path: The directory path to create.
38
39 Returns:
40 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070041 """
Stephen Warren10e50632016-01-15 11:15:24 -070042
43 try:
44 os.makedirs(path)
45 except OSError as exc:
46 if exc.errno == errno.EEXIST and os.path.isdir(path):
47 pass
48 else:
49 raise
50
51def pytest_addoption(parser):
Stephen Warren75e731e2016-01-26 13:41:30 -070052 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warren10e50632016-01-15 11:15:24 -070053
54 Args:
55 parser: The pytest command-line parser.
56
57 Returns:
58 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070059 """
Stephen Warren10e50632016-01-15 11:15:24 -070060
61 parser.addoption('--build-dir', default=None,
62 help='U-Boot build directory (O=)')
63 parser.addoption('--result-dir', default=None,
64 help='U-Boot test result/tmp directory')
65 parser.addoption('--persistent-data-dir', default=None,
66 help='U-Boot test persistent generated data directory')
67 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
68 help='U-Boot board type')
69 parser.addoption('--board-identity', '--id', default='na',
70 help='U-Boot board identity/instance')
71 parser.addoption('--build', default=False, action='store_true',
72 help='Compile U-Boot before running tests')
Simon Glass6e094842020-03-18 09:43:01 -060073 parser.addoption('--buildman', default=False, action='store_true',
74 help='Use buildman to build U-Boot (assuming --build is given)')
Stephen Warren33db1ee2016-02-04 16:11:50 -070075 parser.addoption('--gdbserver', default=None,
76 help='Run sandbox under gdbserver. The argument is the channel '+
77 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warren10e50632016-01-15 11:15:24 -070078
79def pytest_configure(config):
Stephen Warren75e731e2016-01-26 13:41:30 -070080 """pytest hook: Perform custom initialization at startup time.
Stephen Warren10e50632016-01-15 11:15:24 -070081
82 Args:
83 config: The pytest configuration.
84
85 Returns:
86 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070087 """
Simon Glassde8e25b2019-12-01 19:34:18 -070088 def parse_config(conf_file):
89 """Parse a config file, loading it into the ubconfig container
90
91 Args:
92 conf_file: Filename to load (within build_dir)
93
94 Raises
95 Exception if the file does not exist
96 """
97 dot_config = build_dir + '/' + conf_file
98 if not os.path.exists(dot_config):
99 raise Exception(conf_file + ' does not exist; ' +
100 'try passing --build option?')
101
102 with open(dot_config, 'rt') as f:
103 ini_str = '[root]\n' + f.read()
104 ini_sio = io.StringIO(ini_str)
105 parser = configparser.RawConfigParser()
106 parser.read_file(ini_sio)
107 ubconfig.buildconfig.update(parser.items('root'))
Stephen Warren10e50632016-01-15 11:15:24 -0700108
109 global log
110 global console
111 global ubconfig
112
113 test_py_dir = os.path.dirname(os.path.abspath(__file__))
114 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
115
116 board_type = config.getoption('board_type')
117 board_type_filename = board_type.replace('-', '_')
118
119 board_identity = config.getoption('board_identity')
120 board_identity_filename = board_identity.replace('-', '_')
121
122 build_dir = config.getoption('build_dir')
123 if not build_dir:
124 build_dir = source_dir + '/build-' + board_type
125 mkdir_p(build_dir)
126
127 result_dir = config.getoption('result_dir')
128 if not result_dir:
129 result_dir = build_dir
130 mkdir_p(result_dir)
131
132 persistent_data_dir = config.getoption('persistent_data_dir')
133 if not persistent_data_dir:
134 persistent_data_dir = build_dir + '/persistent-data'
135 mkdir_p(persistent_data_dir)
136
Stephen Warren33db1ee2016-02-04 16:11:50 -0700137 gdbserver = config.getoption('gdbserver')
Igor Opaniukea5f17d2019-02-12 16:18:14 +0200138 if gdbserver and not board_type.startswith('sandbox'):
139 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren33db1ee2016-02-04 16:11:50 -0700140
Stephen Warren10e50632016-01-15 11:15:24 -0700141 import multiplexed_log
142 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
143
144 if config.getoption('build'):
Simon Glass6e094842020-03-18 09:43:01 -0600145 if config.getoption('buildman'):
146 if build_dir != source_dir:
147 dest_args = ['-o', build_dir, '-w']
148 else:
149 dest_args = ['-i']
150 cmds = (['buildman', '--board', board_type] + dest_args,)
151 name = 'buildman'
Stephen Warren10e50632016-01-15 11:15:24 -0700152 else:
Simon Glass6e094842020-03-18 09:43:01 -0600153 if build_dir != source_dir:
154 o_opt = 'O=%s' % build_dir
155 else:
156 o_opt = ''
157 cmds = (
158 ['make', o_opt, '-s', board_type + '_defconfig'],
Heinrich Schuchardtf78bbf82020-05-31 00:44:24 +0200159 ['make', o_opt, '-s', '-j{}'.format(os.cpu_count())],
Simon Glass6e094842020-03-18 09:43:01 -0600160 )
161 name = 'make'
162
163 with log.section(name):
164 runner = log.get_runner(name, sys.stdout)
Stephen Warrene3f2a502016-02-03 16:46:34 -0700165 for cmd in cmds:
166 runner.run(cmd, cwd=source_dir)
167 runner.close()
168 log.status_pass('OK')
Stephen Warren10e50632016-01-15 11:15:24 -0700169
170 class ArbitraryAttributeContainer(object):
171 pass
172
173 ubconfig = ArbitraryAttributeContainer()
174 ubconfig.brd = dict()
175 ubconfig.env = dict()
176
177 modules = [
178 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
179 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
180 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
181 board_identity_filename),
182 ]
183 for (dict_to_fill, module_name) in modules:
184 try:
185 module = __import__(module_name)
186 except ImportError:
187 continue
188 dict_to_fill.update(module.__dict__)
189
190 ubconfig.buildconfig = dict()
191
Simon Glassde8e25b2019-12-01 19:34:18 -0700192 # buildman -k puts autoconf.mk in the rootdir, so handle this as well
193 # as the standard U-Boot build which leaves it in include/autoconf.mk
194 parse_config('.config')
195 if os.path.exists(build_dir + '/' + 'autoconf.mk'):
196 parse_config('autoconf.mk')
197 else:
198 parse_config('include/autoconf.mk')
Stephen Warren10e50632016-01-15 11:15:24 -0700199
200 ubconfig.test_py_dir = test_py_dir
201 ubconfig.source_dir = source_dir
202 ubconfig.build_dir = build_dir
203 ubconfig.result_dir = result_dir
204 ubconfig.persistent_data_dir = persistent_data_dir
205 ubconfig.board_type = board_type
206 ubconfig.board_identity = board_identity
Stephen Warren33db1ee2016-02-04 16:11:50 -0700207 ubconfig.gdbserver = gdbserver
Simon Glass3b097872016-07-03 09:40:36 -0600208 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warren10e50632016-01-15 11:15:24 -0700209
210 env_vars = (
211 'board_type',
212 'board_identity',
213 'source_dir',
214 'test_py_dir',
215 'build_dir',
216 'result_dir',
217 'persistent_data_dir',
218 )
219 for v in env_vars:
220 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
221
Simon Glass13f422e2016-07-04 11:58:37 -0600222 if board_type.startswith('sandbox'):
Stephen Warren10e50632016-01-15 11:15:24 -0700223 import u_boot_console_sandbox
224 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
225 else:
226 import u_boot_console_exec_attach
227 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
228
Simon Glass23300b42021-10-23 17:26:11 -0600229re_ut_test_list = re.compile(r'[^a-zA-Z0-9_]_u_boot_list_2_ut_(.*)_test_2_(.*)\s*$')
Simon Glassed298be2020-10-25 20:38:31 -0600230def generate_ut_subtest(metafunc, fixture_name, sym_path):
Stephen Warren770fe172016-02-08 14:44:16 -0700231 """Provide parametrization for a ut_subtest fixture.
232
233 Determines the set of unit tests built into a U-Boot binary by parsing the
234 list of symbols generated by the build process. Provides this information
235 to test functions by parameterizing their ut_subtest fixture parameter.
236
237 Args:
238 metafunc: The pytest test function.
239 fixture_name: The fixture name to test.
Simon Glassed298be2020-10-25 20:38:31 -0600240 sym_path: Relative path to the symbol file with preceding '/'
241 (e.g. '/u-boot.sym')
Stephen Warren770fe172016-02-08 14:44:16 -0700242
243 Returns:
244 Nothing.
245 """
Simon Glassed298be2020-10-25 20:38:31 -0600246 fn = console.config.build_dir + sym_path
Stephen Warren770fe172016-02-08 14:44:16 -0700247 try:
248 with open(fn, 'rt') as f:
249 lines = f.readlines()
250 except:
251 lines = []
252 lines.sort()
253
254 vals = []
255 for l in lines:
256 m = re_ut_test_list.search(l)
257 if not m:
258 continue
259 vals.append(m.group(1) + ' ' + m.group(2))
260
261 ids = ['ut_' + s.replace(' ', '_') for s in vals]
262 metafunc.parametrize(fixture_name, vals, ids=ids)
263
264def generate_config(metafunc, fixture_name):
265 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warren10e50632016-01-15 11:15:24 -0700266
267 If a test function takes parameter(s) (fixture names) of the form brd__xxx
268 or env__xxx, the brd and env configuration dictionaries are consulted to
269 find the list of values to use for those parameters, and the test is
270 parametrized so that it runs once for each combination of values.
271
272 Args:
273 metafunc: The pytest test function.
Stephen Warren770fe172016-02-08 14:44:16 -0700274 fixture_name: The fixture name to test.
Stephen Warren10e50632016-01-15 11:15:24 -0700275
276 Returns:
277 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700278 """
Stephen Warren10e50632016-01-15 11:15:24 -0700279
280 subconfigs = {
281 'brd': console.config.brd,
282 'env': console.config.env,
283 }
Stephen Warren770fe172016-02-08 14:44:16 -0700284 parts = fixture_name.split('__')
285 if len(parts) < 2:
286 return
287 if parts[0] not in subconfigs:
288 return
289 subconfig = subconfigs[parts[0]]
290 vals = []
291 val = subconfig.get(fixture_name, [])
292 # If that exact name is a key in the data source:
293 if val:
294 # ... use the dict value as a single parameter value.
295 vals = (val, )
296 else:
297 # ... otherwise, see if there's a key that contains a list of
298 # values to use instead.
299 vals = subconfig.get(fixture_name+ 's', [])
300 def fixture_id(index, val):
301 try:
302 return val['fixture_id']
303 except:
304 return fixture_name + str(index)
305 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
306 metafunc.parametrize(fixture_name, vals, ids=ids)
307
308def pytest_generate_tests(metafunc):
309 """pytest hook: parameterize test functions based on custom rules.
310
311 Check each test function parameter (fixture name) to see if it is one of
312 our custom names, and if so, provide the correct parametrization for that
313 parameter.
314
315 Args:
316 metafunc: The pytest test function.
317
318 Returns:
319 Nothing.
320 """
Stephen Warren10e50632016-01-15 11:15:24 -0700321 for fn in metafunc.fixturenames:
Stephen Warren770fe172016-02-08 14:44:16 -0700322 if fn == 'ut_subtest':
Simon Glassed298be2020-10-25 20:38:31 -0600323 generate_ut_subtest(metafunc, fn, '/u-boot.sym')
324 continue
325 if fn == 'ut_spl_subtest':
326 generate_ut_subtest(metafunc, fn, '/spl/u-boot-spl.sym')
Stephen Warren10e50632016-01-15 11:15:24 -0700327 continue
Stephen Warren770fe172016-02-08 14:44:16 -0700328 generate_config(metafunc, fn)
Stephen Warren10e50632016-01-15 11:15:24 -0700329
Stefan Brüns364ea872016-11-05 17:45:32 +0100330@pytest.fixture(scope='session')
331def u_boot_log(request):
332 """Generate the value of a test's log fixture.
333
334 Args:
335 request: The pytest request.
336
337 Returns:
338 The fixture value.
339 """
340
341 return console.log
342
343@pytest.fixture(scope='session')
344def u_boot_config(request):
345 """Generate the value of a test's u_boot_config fixture.
346
347 Args:
348 request: The pytest request.
349
350 Returns:
351 The fixture value.
352 """
353
354 return console.config
355
Stephen Warrene1d24d02016-01-22 12:30:08 -0700356@pytest.fixture(scope='function')
Stephen Warren10e50632016-01-15 11:15:24 -0700357def u_boot_console(request):
Stephen Warren75e731e2016-01-26 13:41:30 -0700358 """Generate the value of a test's u_boot_console fixture.
Stephen Warren10e50632016-01-15 11:15:24 -0700359
360 Args:
361 request: The pytest request.
362
363 Returns:
364 The fixture value.
Stephen Warren75e731e2016-01-26 13:41:30 -0700365 """
Stephen Warren10e50632016-01-15 11:15:24 -0700366
Stephen Warrene1d24d02016-01-22 12:30:08 -0700367 console.ensure_spawned()
Stephen Warren10e50632016-01-15 11:15:24 -0700368 return console
369
Stephen Warrene3f2a502016-02-03 16:46:34 -0700370anchors = {}
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700371tests_not_run = []
372tests_failed = []
373tests_xpassed = []
374tests_xfailed = []
375tests_skipped = []
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700376tests_warning = []
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700377tests_passed = []
Stephen Warren10e50632016-01-15 11:15:24 -0700378
379def pytest_itemcollected(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700380 """pytest hook: Called once for each test found during collection.
Stephen Warren10e50632016-01-15 11:15:24 -0700381
382 This enables our custom result analysis code to see the list of all tests
383 that should eventually be run.
384
385 Args:
386 item: The item that was collected.
387
388 Returns:
389 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700390 """
Stephen Warren10e50632016-01-15 11:15:24 -0700391
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700392 tests_not_run.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700393
394def cleanup():
Stephen Warren75e731e2016-01-26 13:41:30 -0700395 """Clean up all global state.
Stephen Warren10e50632016-01-15 11:15:24 -0700396
397 Executed (via atexit) once the entire test process is complete. This
398 includes logging the status of all tests, and the identity of any failed
399 or skipped tests.
400
401 Args:
402 None.
403
404 Returns:
405 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700406 """
Stephen Warren10e50632016-01-15 11:15:24 -0700407
408 if console:
409 console.close()
410 if log:
Stephen Warrene3f2a502016-02-03 16:46:34 -0700411 with log.section('Status Report', 'status_report'):
412 log.status_pass('%d passed' % len(tests_passed))
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700413 if tests_warning:
414 log.status_warning('%d passed with warning' % len(tests_warning))
415 for test in tests_warning:
416 anchor = anchors.get(test, None)
417 log.status_warning('... ' + test, anchor)
Stephen Warrene3f2a502016-02-03 16:46:34 -0700418 if tests_skipped:
419 log.status_skipped('%d skipped' % len(tests_skipped))
420 for test in tests_skipped:
421 anchor = anchors.get(test, None)
422 log.status_skipped('... ' + test, anchor)
423 if tests_xpassed:
424 log.status_xpass('%d xpass' % len(tests_xpassed))
425 for test in tests_xpassed:
426 anchor = anchors.get(test, None)
427 log.status_xpass('... ' + test, anchor)
428 if tests_xfailed:
429 log.status_xfail('%d xfail' % len(tests_xfailed))
430 for test in tests_xfailed:
431 anchor = anchors.get(test, None)
432 log.status_xfail('... ' + test, anchor)
433 if tests_failed:
434 log.status_fail('%d failed' % len(tests_failed))
435 for test in tests_failed:
436 anchor = anchors.get(test, None)
437 log.status_fail('... ' + test, anchor)
438 if tests_not_run:
439 log.status_fail('%d not run' % len(tests_not_run))
440 for test in tests_not_run:
441 anchor = anchors.get(test, None)
442 log.status_fail('... ' + test, anchor)
Stephen Warren10e50632016-01-15 11:15:24 -0700443 log.close()
444atexit.register(cleanup)
445
446def setup_boardspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700447 """Process any 'boardspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700448
449 Such a marker lists the set of board types that a test does/doesn't
450 support. If tests are being executed on an unsupported board, the test is
451 marked to be skipped.
452
453 Args:
454 item: The pytest test item.
455
456 Returns:
457 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700458 """
Stephen Warren10e50632016-01-15 11:15:24 -0700459
Stephen Warren10e50632016-01-15 11:15:24 -0700460 required_boards = []
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400461 for boards in item.iter_markers('boardspec'):
462 board = boards.args[0]
Stephen Warren10e50632016-01-15 11:15:24 -0700463 if board.startswith('!'):
464 if ubconfig.board_type == board[1:]:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600465 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700466 return
467 else:
468 required_boards.append(board)
469 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600470 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700471
472def setup_buildconfigspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700473 """Process any 'buildconfigspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700474
475 Such a marker lists some U-Boot configuration feature that the test
476 requires. If tests are being executed on an U-Boot build that doesn't
477 have the required feature, the test is marked to be skipped.
478
479 Args:
480 item: The pytest test item.
481
482 Returns:
483 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700484 """
Stephen Warren10e50632016-01-15 11:15:24 -0700485
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400486 for options in item.iter_markers('buildconfigspec'):
487 option = options.args[0]
488 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
489 pytest.skip('.config feature "%s" not enabled' % option.lower())
Cristian Ciocaltea6c6c8072019-12-24 17:19:12 +0200490 for options in item.iter_markers('notbuildconfigspec'):
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400491 option = options.args[0]
492 if ubconfig.buildconfig.get('config_' + option.lower(), None):
493 pytest.skip('.config feature "%s" enabled' % option.lower())
Stephen Warren10e50632016-01-15 11:15:24 -0700494
Stephen Warren2079db32017-09-18 11:11:49 -0600495def tool_is_in_path(tool):
496 for path in os.environ["PATH"].split(os.pathsep):
497 fn = os.path.join(path, tool)
498 if os.path.isfile(fn) and os.access(fn, os.X_OK):
499 return True
500 return False
501
502def setup_requiredtool(item):
503 """Process any 'requiredtool' marker for a test.
504
505 Such a marker lists some external tool (binary, executable, application)
506 that the test requires. If tests are being executed on a system that
507 doesn't have the required tool, the test is marked to be skipped.
508
509 Args:
510 item: The pytest test item.
511
512 Returns:
513 Nothing.
514 """
515
Marek Vasut9dfdf6e2019-10-24 11:59:19 -0400516 for tools in item.iter_markers('requiredtool'):
517 tool = tools.args[0]
Stephen Warren2079db32017-09-18 11:11:49 -0600518 if not tool_is_in_path(tool):
519 pytest.skip('tool "%s" not in $PATH' % tool)
520
Stephen Warren3e3d1432016-10-17 17:25:52 -0600521def start_test_section(item):
522 anchors[item.name] = log.start_section(item.name)
523
Stephen Warren10e50632016-01-15 11:15:24 -0700524def pytest_runtest_setup(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700525 """pytest hook: Configure (set up) a test item.
Stephen Warren10e50632016-01-15 11:15:24 -0700526
527 Called once for each test to perform any custom configuration. This hook
528 is used to skip the test if certain conditions apply.
529
530 Args:
531 item: The pytest test item.
532
533 Returns:
534 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700535 """
Stephen Warren10e50632016-01-15 11:15:24 -0700536
Stephen Warren3e3d1432016-10-17 17:25:52 -0600537 start_test_section(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700538 setup_boardspec(item)
539 setup_buildconfigspec(item)
Stephen Warren2079db32017-09-18 11:11:49 -0600540 setup_requiredtool(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700541
542def pytest_runtest_protocol(item, nextitem):
Stephen Warren75e731e2016-01-26 13:41:30 -0700543 """pytest hook: Called to execute a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700544
545 This hook wraps the standard pytest runtestprotocol() function in order
546 to acquire visibility into, and record, each test function's result.
547
548 Args:
549 item: The pytest test item to execute.
550 nextitem: The pytest test item that will be executed after this one.
551
552 Returns:
553 A list of pytest reports (test result data).
Stephen Warren75e731e2016-01-26 13:41:30 -0700554 """
Stephen Warren10e50632016-01-15 11:15:24 -0700555
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700556 log.get_and_reset_warning()
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700557 ihook = item.ihook
558 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
Stephen Warren10e50632016-01-15 11:15:24 -0700559 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700560 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700561 was_warning = log.get_and_reset_warning()
Stephen Warren25b05242016-01-27 23:57:51 -0700562
Stephen Warren3e3d1432016-10-17 17:25:52 -0600563 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
564 # the test is skipped. That call is required to create the test's section
565 # in the log file. The call to log.end_section() requires that the log
566 # contain a section for this test. Create a section for the test if it
567 # doesn't already exist.
568 if not item.name in anchors:
569 start_test_section(item)
570
Stephen Warren25b05242016-01-27 23:57:51 -0700571 failure_cleanup = False
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700572 if not was_warning:
573 test_list = tests_passed
574 msg = 'OK'
575 msg_log = log.status_pass
576 else:
577 test_list = tests_warning
578 msg = 'OK (with warning)'
579 msg_log = log.status_warning
Stephen Warren10e50632016-01-15 11:15:24 -0700580 for report in reports:
581 if report.outcome == 'failed':
Stephen Warren25b05242016-01-27 23:57:51 -0700582 if hasattr(report, 'wasxfail'):
583 test_list = tests_xpassed
584 msg = 'XPASSED'
585 msg_log = log.status_xpass
586 else:
587 failure_cleanup = True
588 test_list = tests_failed
589 msg = 'FAILED:\n' + str(report.longrepr)
590 msg_log = log.status_fail
Stephen Warren10e50632016-01-15 11:15:24 -0700591 break
592 if report.outcome == 'skipped':
Stephen Warren25b05242016-01-27 23:57:51 -0700593 if hasattr(report, 'wasxfail'):
594 failure_cleanup = True
595 test_list = tests_xfailed
596 msg = 'XFAILED:\n' + str(report.longrepr)
597 msg_log = log.status_xfail
598 break
599 test_list = tests_skipped
600 msg = 'SKIPPED:\n' + str(report.longrepr)
601 msg_log = log.status_skipped
Stephen Warren10e50632016-01-15 11:15:24 -0700602
Stephen Warren25b05242016-01-27 23:57:51 -0700603 if failure_cleanup:
Stephen Warren97a54662016-01-22 12:30:09 -0700604 console.drain_console()
Stephen Warren25b05242016-01-27 23:57:51 -0700605
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700606 test_list.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700607 tests_not_run.remove(item.name)
608
609 try:
Stephen Warren25b05242016-01-27 23:57:51 -0700610 msg_log(msg)
Stephen Warren10e50632016-01-15 11:15:24 -0700611 except:
612 # If something went wrong with logging, it's better to let the test
613 # process continue, which may report other exceptions that triggered
614 # the logging issue (e.g. console.log wasn't created). Hence, just
615 # squash the exception. If the test setup failed due to e.g. syntax
616 # error somewhere else, this won't be seen. However, once that issue
617 # is fixed, if this exception still exists, it will then be logged as
618 # part of the test's stdout.
619 import traceback
Paul Burton00f2d202017-09-14 14:34:43 -0700620 print('Exception occurred while logging runtest status:')
Stephen Warren10e50632016-01-15 11:15:24 -0700621 traceback.print_exc()
622 # FIXME: Can we force a test failure here?
623
624 log.end_section(item.name)
625
Stephen Warren25b05242016-01-27 23:57:51 -0700626 if failure_cleanup:
Stephen Warren10e50632016-01-15 11:15:24 -0700627 console.cleanup_spawn()
628
Stephen Warren76e6a9e2021-01-30 20:12:18 -0700629 return True