blob: e40cbf0ba12ab329afdc08413d71719cee992b8b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001# SPDX-License-Identifier: GPL-2.0
Stephen Warren10e50632016-01-15 11:15:24 -07002# Copyright (c) 2015 Stephen Warren
3# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
Stephen Warren10e50632016-01-15 11:15:24 -07004
5# Implementation of pytest run-time hook functions. These are invoked by
6# pytest at certain points during operation, e.g. startup, for each executed
7# test, at shutdown etc. These hooks perform functions such as:
8# - Parsing custom command-line options.
9# - Pullilng in user-specified board configuration.
10# - Creating the U-Boot console test fixture.
11# - Creating the HTML log file.
12# - Monitoring each test's results.
13# - Implementing custom pytest markers.
14
15import atexit
16import errno
17import os
18import os.path
Stephen Warren10e50632016-01-15 11:15:24 -070019import pytest
20from _pytest.runner import runtestprotocol
Stephen Warren770fe172016-02-08 14:44:16 -070021import re
Stephen Warren10e50632016-01-15 11:15:24 -070022import StringIO
23import sys
24
Paul Burton5ddc2872017-09-14 14:34:45 -070025try:
26 import configparser
27except:
28 import ConfigParser as configparser
29
Stephen Warren10e50632016-01-15 11:15:24 -070030# Globals: The HTML log file, and the connection to the U-Boot console.
31log = None
32console = None
33
34def mkdir_p(path):
Stephen Warren75e731e2016-01-26 13:41:30 -070035 """Create a directory path.
Stephen Warren10e50632016-01-15 11:15:24 -070036
37 This includes creating any intermediate/parent directories. Any errors
38 caused due to already extant directories are ignored.
39
40 Args:
41 path: The directory path to create.
42
43 Returns:
44 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070045 """
Stephen Warren10e50632016-01-15 11:15:24 -070046
47 try:
48 os.makedirs(path)
49 except OSError as exc:
50 if exc.errno == errno.EEXIST and os.path.isdir(path):
51 pass
52 else:
53 raise
54
55def pytest_addoption(parser):
Stephen Warren75e731e2016-01-26 13:41:30 -070056 """pytest hook: Add custom command-line options to the cmdline parser.
Stephen Warren10e50632016-01-15 11:15:24 -070057
58 Args:
59 parser: The pytest command-line parser.
60
61 Returns:
62 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070063 """
Stephen Warren10e50632016-01-15 11:15:24 -070064
65 parser.addoption('--build-dir', default=None,
66 help='U-Boot build directory (O=)')
67 parser.addoption('--result-dir', default=None,
68 help='U-Boot test result/tmp directory')
69 parser.addoption('--persistent-data-dir', default=None,
70 help='U-Boot test persistent generated data directory')
71 parser.addoption('--board-type', '--bd', '-B', default='sandbox',
72 help='U-Boot board type')
73 parser.addoption('--board-identity', '--id', default='na',
74 help='U-Boot board identity/instance')
75 parser.addoption('--build', default=False, action='store_true',
76 help='Compile U-Boot before running tests')
Stephen Warren33db1ee2016-02-04 16:11:50 -070077 parser.addoption('--gdbserver', default=None,
78 help='Run sandbox under gdbserver. The argument is the channel '+
79 'over which gdbserver should communicate, e.g. localhost:1234')
Stephen Warren10e50632016-01-15 11:15:24 -070080
81def pytest_configure(config):
Stephen Warren75e731e2016-01-26 13:41:30 -070082 """pytest hook: Perform custom initialization at startup time.
Stephen Warren10e50632016-01-15 11:15:24 -070083
84 Args:
85 config: The pytest configuration.
86
87 Returns:
88 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -070089 """
Stephen Warren10e50632016-01-15 11:15:24 -070090
91 global log
92 global console
93 global ubconfig
94
95 test_py_dir = os.path.dirname(os.path.abspath(__file__))
96 source_dir = os.path.dirname(os.path.dirname(test_py_dir))
97
98 board_type = config.getoption('board_type')
99 board_type_filename = board_type.replace('-', '_')
100
101 board_identity = config.getoption('board_identity')
102 board_identity_filename = board_identity.replace('-', '_')
103
104 build_dir = config.getoption('build_dir')
105 if not build_dir:
106 build_dir = source_dir + '/build-' + board_type
107 mkdir_p(build_dir)
108
109 result_dir = config.getoption('result_dir')
110 if not result_dir:
111 result_dir = build_dir
112 mkdir_p(result_dir)
113
114 persistent_data_dir = config.getoption('persistent_data_dir')
115 if not persistent_data_dir:
116 persistent_data_dir = build_dir + '/persistent-data'
117 mkdir_p(persistent_data_dir)
118
Stephen Warren33db1ee2016-02-04 16:11:50 -0700119 gdbserver = config.getoption('gdbserver')
Igor Opaniukea5f17d2019-02-12 16:18:14 +0200120 if gdbserver and not board_type.startswith('sandbox'):
121 raise Exception('--gdbserver only supported with sandbox targets')
Stephen Warren33db1ee2016-02-04 16:11:50 -0700122
Stephen Warren10e50632016-01-15 11:15:24 -0700123 import multiplexed_log
124 log = multiplexed_log.Logfile(result_dir + '/test-log.html')
125
126 if config.getoption('build'):
127 if build_dir != source_dir:
128 o_opt = 'O=%s' % build_dir
129 else:
130 o_opt = ''
131 cmds = (
132 ['make', o_opt, '-s', board_type + '_defconfig'],
133 ['make', o_opt, '-s', '-j8'],
134 )
Stephen Warrene3f2a502016-02-03 16:46:34 -0700135 with log.section('make'):
136 runner = log.get_runner('make', sys.stdout)
137 for cmd in cmds:
138 runner.run(cmd, cwd=source_dir)
139 runner.close()
140 log.status_pass('OK')
Stephen Warren10e50632016-01-15 11:15:24 -0700141
142 class ArbitraryAttributeContainer(object):
143 pass
144
145 ubconfig = ArbitraryAttributeContainer()
146 ubconfig.brd = dict()
147 ubconfig.env = dict()
148
149 modules = [
150 (ubconfig.brd, 'u_boot_board_' + board_type_filename),
151 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename),
152 (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' +
153 board_identity_filename),
154 ]
155 for (dict_to_fill, module_name) in modules:
156 try:
157 module = __import__(module_name)
158 except ImportError:
159 continue
160 dict_to_fill.update(module.__dict__)
161
162 ubconfig.buildconfig = dict()
163
164 for conf_file in ('.config', 'include/autoconf.mk'):
165 dot_config = build_dir + '/' + conf_file
166 if not os.path.exists(dot_config):
167 raise Exception(conf_file + ' does not exist; ' +
168 'try passing --build option?')
169
170 with open(dot_config, 'rt') as f:
171 ini_str = '[root]\n' + f.read()
172 ini_sio = StringIO.StringIO(ini_str)
Paul Burton5ddc2872017-09-14 14:34:45 -0700173 parser = configparser.RawConfigParser()
Stephen Warren10e50632016-01-15 11:15:24 -0700174 parser.readfp(ini_sio)
175 ubconfig.buildconfig.update(parser.items('root'))
176
177 ubconfig.test_py_dir = test_py_dir
178 ubconfig.source_dir = source_dir
179 ubconfig.build_dir = build_dir
180 ubconfig.result_dir = result_dir
181 ubconfig.persistent_data_dir = persistent_data_dir
182 ubconfig.board_type = board_type
183 ubconfig.board_identity = board_identity
Stephen Warren33db1ee2016-02-04 16:11:50 -0700184 ubconfig.gdbserver = gdbserver
Simon Glass3b097872016-07-03 09:40:36 -0600185 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
Stephen Warren10e50632016-01-15 11:15:24 -0700186
187 env_vars = (
188 'board_type',
189 'board_identity',
190 'source_dir',
191 'test_py_dir',
192 'build_dir',
193 'result_dir',
194 'persistent_data_dir',
195 )
196 for v in env_vars:
197 os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v)
198
Simon Glass13f422e2016-07-04 11:58:37 -0600199 if board_type.startswith('sandbox'):
Stephen Warren10e50632016-01-15 11:15:24 -0700200 import u_boot_console_sandbox
201 console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig)
202 else:
203 import u_boot_console_exec_attach
204 console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig)
205
Simon Glass71861672017-11-25 11:57:32 -0700206re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$')
Stephen Warren770fe172016-02-08 14:44:16 -0700207def generate_ut_subtest(metafunc, fixture_name):
208 """Provide parametrization for a ut_subtest fixture.
209
210 Determines the set of unit tests built into a U-Boot binary by parsing the
211 list of symbols generated by the build process. Provides this information
212 to test functions by parameterizing their ut_subtest fixture parameter.
213
214 Args:
215 metafunc: The pytest test function.
216 fixture_name: The fixture name to test.
217
218 Returns:
219 Nothing.
220 """
221
222 fn = console.config.build_dir + '/u-boot.sym'
223 try:
224 with open(fn, 'rt') as f:
225 lines = f.readlines()
226 except:
227 lines = []
228 lines.sort()
229
230 vals = []
231 for l in lines:
232 m = re_ut_test_list.search(l)
233 if not m:
234 continue
235 vals.append(m.group(1) + ' ' + m.group(2))
236
237 ids = ['ut_' + s.replace(' ', '_') for s in vals]
238 metafunc.parametrize(fixture_name, vals, ids=ids)
239
240def generate_config(metafunc, fixture_name):
241 """Provide parametrization for {env,brd}__ fixtures.
Stephen Warren10e50632016-01-15 11:15:24 -0700242
243 If a test function takes parameter(s) (fixture names) of the form brd__xxx
244 or env__xxx, the brd and env configuration dictionaries are consulted to
245 find the list of values to use for those parameters, and the test is
246 parametrized so that it runs once for each combination of values.
247
248 Args:
249 metafunc: The pytest test function.
Stephen Warren770fe172016-02-08 14:44:16 -0700250 fixture_name: The fixture name to test.
Stephen Warren10e50632016-01-15 11:15:24 -0700251
252 Returns:
253 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700254 """
Stephen Warren10e50632016-01-15 11:15:24 -0700255
256 subconfigs = {
257 'brd': console.config.brd,
258 'env': console.config.env,
259 }
Stephen Warren770fe172016-02-08 14:44:16 -0700260 parts = fixture_name.split('__')
261 if len(parts) < 2:
262 return
263 if parts[0] not in subconfigs:
264 return
265 subconfig = subconfigs[parts[0]]
266 vals = []
267 val = subconfig.get(fixture_name, [])
268 # If that exact name is a key in the data source:
269 if val:
270 # ... use the dict value as a single parameter value.
271 vals = (val, )
272 else:
273 # ... otherwise, see if there's a key that contains a list of
274 # values to use instead.
275 vals = subconfig.get(fixture_name+ 's', [])
276 def fixture_id(index, val):
277 try:
278 return val['fixture_id']
279 except:
280 return fixture_name + str(index)
281 ids = [fixture_id(index, val) for (index, val) in enumerate(vals)]
282 metafunc.parametrize(fixture_name, vals, ids=ids)
283
284def pytest_generate_tests(metafunc):
285 """pytest hook: parameterize test functions based on custom rules.
286
287 Check each test function parameter (fixture name) to see if it is one of
288 our custom names, and if so, provide the correct parametrization for that
289 parameter.
290
291 Args:
292 metafunc: The pytest test function.
293
294 Returns:
295 Nothing.
296 """
297
Stephen Warren10e50632016-01-15 11:15:24 -0700298 for fn in metafunc.fixturenames:
Stephen Warren770fe172016-02-08 14:44:16 -0700299 if fn == 'ut_subtest':
300 generate_ut_subtest(metafunc, fn)
Stephen Warren10e50632016-01-15 11:15:24 -0700301 continue
Stephen Warren770fe172016-02-08 14:44:16 -0700302 generate_config(metafunc, fn)
Stephen Warren10e50632016-01-15 11:15:24 -0700303
Stefan Brüns364ea872016-11-05 17:45:32 +0100304@pytest.fixture(scope='session')
305def u_boot_log(request):
306 """Generate the value of a test's log fixture.
307
308 Args:
309 request: The pytest request.
310
311 Returns:
312 The fixture value.
313 """
314
315 return console.log
316
317@pytest.fixture(scope='session')
318def u_boot_config(request):
319 """Generate the value of a test's u_boot_config fixture.
320
321 Args:
322 request: The pytest request.
323
324 Returns:
325 The fixture value.
326 """
327
328 return console.config
329
Stephen Warrene1d24d02016-01-22 12:30:08 -0700330@pytest.fixture(scope='function')
Stephen Warren10e50632016-01-15 11:15:24 -0700331def u_boot_console(request):
Stephen Warren75e731e2016-01-26 13:41:30 -0700332 """Generate the value of a test's u_boot_console fixture.
Stephen Warren10e50632016-01-15 11:15:24 -0700333
334 Args:
335 request: The pytest request.
336
337 Returns:
338 The fixture value.
Stephen Warren75e731e2016-01-26 13:41:30 -0700339 """
Stephen Warren10e50632016-01-15 11:15:24 -0700340
Stephen Warrene1d24d02016-01-22 12:30:08 -0700341 console.ensure_spawned()
Stephen Warren10e50632016-01-15 11:15:24 -0700342 return console
343
Stephen Warrene3f2a502016-02-03 16:46:34 -0700344anchors = {}
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700345tests_not_run = []
346tests_failed = []
347tests_xpassed = []
348tests_xfailed = []
349tests_skipped = []
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700350tests_warning = []
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700351tests_passed = []
Stephen Warren10e50632016-01-15 11:15:24 -0700352
353def pytest_itemcollected(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700354 """pytest hook: Called once for each test found during collection.
Stephen Warren10e50632016-01-15 11:15:24 -0700355
356 This enables our custom result analysis code to see the list of all tests
357 that should eventually be run.
358
359 Args:
360 item: The item that was collected.
361
362 Returns:
363 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700364 """
Stephen Warren10e50632016-01-15 11:15:24 -0700365
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700366 tests_not_run.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700367
368def cleanup():
Stephen Warren75e731e2016-01-26 13:41:30 -0700369 """Clean up all global state.
Stephen Warren10e50632016-01-15 11:15:24 -0700370
371 Executed (via atexit) once the entire test process is complete. This
372 includes logging the status of all tests, and the identity of any failed
373 or skipped tests.
374
375 Args:
376 None.
377
378 Returns:
379 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700380 """
Stephen Warren10e50632016-01-15 11:15:24 -0700381
382 if console:
383 console.close()
384 if log:
Stephen Warrene3f2a502016-02-03 16:46:34 -0700385 with log.section('Status Report', 'status_report'):
386 log.status_pass('%d passed' % len(tests_passed))
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700387 if tests_warning:
388 log.status_warning('%d passed with warning' % len(tests_warning))
389 for test in tests_warning:
390 anchor = anchors.get(test, None)
391 log.status_warning('... ' + test, anchor)
Stephen Warrene3f2a502016-02-03 16:46:34 -0700392 if tests_skipped:
393 log.status_skipped('%d skipped' % len(tests_skipped))
394 for test in tests_skipped:
395 anchor = anchors.get(test, None)
396 log.status_skipped('... ' + test, anchor)
397 if tests_xpassed:
398 log.status_xpass('%d xpass' % len(tests_xpassed))
399 for test in tests_xpassed:
400 anchor = anchors.get(test, None)
401 log.status_xpass('... ' + test, anchor)
402 if tests_xfailed:
403 log.status_xfail('%d xfail' % len(tests_xfailed))
404 for test in tests_xfailed:
405 anchor = anchors.get(test, None)
406 log.status_xfail('... ' + test, anchor)
407 if tests_failed:
408 log.status_fail('%d failed' % len(tests_failed))
409 for test in tests_failed:
410 anchor = anchors.get(test, None)
411 log.status_fail('... ' + test, anchor)
412 if tests_not_run:
413 log.status_fail('%d not run' % len(tests_not_run))
414 for test in tests_not_run:
415 anchor = anchors.get(test, None)
416 log.status_fail('... ' + test, anchor)
Stephen Warren10e50632016-01-15 11:15:24 -0700417 log.close()
418atexit.register(cleanup)
419
420def setup_boardspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700421 """Process any 'boardspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700422
423 Such a marker lists the set of board types that a test does/doesn't
424 support. If tests are being executed on an unsupported board, the test is
425 marked to be skipped.
426
427 Args:
428 item: The pytest test item.
429
430 Returns:
431 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700432 """
Stephen Warren10e50632016-01-15 11:15:24 -0700433
434 mark = item.get_marker('boardspec')
435 if not mark:
436 return
437 required_boards = []
438 for board in mark.args:
439 if board.startswith('!'):
440 if ubconfig.board_type == board[1:]:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600441 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700442 return
443 else:
444 required_boards.append(board)
445 if required_boards and ubconfig.board_type not in required_boards:
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600446 pytest.skip('board "%s" not supported' % ubconfig.board_type)
Stephen Warren10e50632016-01-15 11:15:24 -0700447
448def setup_buildconfigspec(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700449 """Process any 'buildconfigspec' marker for a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700450
451 Such a marker lists some U-Boot configuration feature that the test
452 requires. If tests are being executed on an U-Boot build that doesn't
453 have the required feature, the test is marked to be skipped.
454
455 Args:
456 item: The pytest test item.
457
458 Returns:
459 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700460 """
Stephen Warren10e50632016-01-15 11:15:24 -0700461
462 mark = item.get_marker('buildconfigspec')
463 if not mark:
464 return
465 for option in mark.args:
466 if not ubconfig.buildconfig.get('config_' + option.lower(), None):
Stephen Warren0f0eeac2017-09-18 11:11:48 -0600467 pytest.skip('.config feature "%s" not enabled' % option.lower())
Stephen Warren10e50632016-01-15 11:15:24 -0700468
Stephen Warren2079db32017-09-18 11:11:49 -0600469def tool_is_in_path(tool):
470 for path in os.environ["PATH"].split(os.pathsep):
471 fn = os.path.join(path, tool)
472 if os.path.isfile(fn) and os.access(fn, os.X_OK):
473 return True
474 return False
475
476def setup_requiredtool(item):
477 """Process any 'requiredtool' marker for a test.
478
479 Such a marker lists some external tool (binary, executable, application)
480 that the test requires. If tests are being executed on a system that
481 doesn't have the required tool, the test is marked to be skipped.
482
483 Args:
484 item: The pytest test item.
485
486 Returns:
487 Nothing.
488 """
489
490 mark = item.get_marker('requiredtool')
491 if not mark:
492 return
493 for tool in mark.args:
494 if not tool_is_in_path(tool):
495 pytest.skip('tool "%s" not in $PATH' % tool)
496
Stephen Warren3e3d1432016-10-17 17:25:52 -0600497def start_test_section(item):
498 anchors[item.name] = log.start_section(item.name)
499
Stephen Warren10e50632016-01-15 11:15:24 -0700500def pytest_runtest_setup(item):
Stephen Warren75e731e2016-01-26 13:41:30 -0700501 """pytest hook: Configure (set up) a test item.
Stephen Warren10e50632016-01-15 11:15:24 -0700502
503 Called once for each test to perform any custom configuration. This hook
504 is used to skip the test if certain conditions apply.
505
506 Args:
507 item: The pytest test item.
508
509 Returns:
510 Nothing.
Stephen Warren75e731e2016-01-26 13:41:30 -0700511 """
Stephen Warren10e50632016-01-15 11:15:24 -0700512
Stephen Warren3e3d1432016-10-17 17:25:52 -0600513 start_test_section(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700514 setup_boardspec(item)
515 setup_buildconfigspec(item)
Stephen Warren2079db32017-09-18 11:11:49 -0600516 setup_requiredtool(item)
Stephen Warren10e50632016-01-15 11:15:24 -0700517
518def pytest_runtest_protocol(item, nextitem):
Stephen Warren75e731e2016-01-26 13:41:30 -0700519 """pytest hook: Called to execute a test.
Stephen Warren10e50632016-01-15 11:15:24 -0700520
521 This hook wraps the standard pytest runtestprotocol() function in order
522 to acquire visibility into, and record, each test function's result.
523
524 Args:
525 item: The pytest test item to execute.
526 nextitem: The pytest test item that will be executed after this one.
527
528 Returns:
529 A list of pytest reports (test result data).
Stephen Warren75e731e2016-01-26 13:41:30 -0700530 """
Stephen Warren10e50632016-01-15 11:15:24 -0700531
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700532 log.get_and_reset_warning()
Stephen Warren10e50632016-01-15 11:15:24 -0700533 reports = runtestprotocol(item, nextitem=nextitem)
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700534 was_warning = log.get_and_reset_warning()
Stephen Warren25b05242016-01-27 23:57:51 -0700535
Stephen Warren3e3d1432016-10-17 17:25:52 -0600536 # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if
537 # the test is skipped. That call is required to create the test's section
538 # in the log file. The call to log.end_section() requires that the log
539 # contain a section for this test. Create a section for the test if it
540 # doesn't already exist.
541 if not item.name in anchors:
542 start_test_section(item)
543
Stephen Warren25b05242016-01-27 23:57:51 -0700544 failure_cleanup = False
Stephen Warrene27a6ae2018-02-20 12:51:55 -0700545 if not was_warning:
546 test_list = tests_passed
547 msg = 'OK'
548 msg_log = log.status_pass
549 else:
550 test_list = tests_warning
551 msg = 'OK (with warning)'
552 msg_log = log.status_warning
Stephen Warren10e50632016-01-15 11:15:24 -0700553 for report in reports:
554 if report.outcome == 'failed':
Stephen Warren25b05242016-01-27 23:57:51 -0700555 if hasattr(report, 'wasxfail'):
556 test_list = tests_xpassed
557 msg = 'XPASSED'
558 msg_log = log.status_xpass
559 else:
560 failure_cleanup = True
561 test_list = tests_failed
562 msg = 'FAILED:\n' + str(report.longrepr)
563 msg_log = log.status_fail
Stephen Warren10e50632016-01-15 11:15:24 -0700564 break
565 if report.outcome == 'skipped':
Stephen Warren25b05242016-01-27 23:57:51 -0700566 if hasattr(report, 'wasxfail'):
567 failure_cleanup = True
568 test_list = tests_xfailed
569 msg = 'XFAILED:\n' + str(report.longrepr)
570 msg_log = log.status_xfail
571 break
572 test_list = tests_skipped
573 msg = 'SKIPPED:\n' + str(report.longrepr)
574 msg_log = log.status_skipped
Stephen Warren10e50632016-01-15 11:15:24 -0700575
Stephen Warren25b05242016-01-27 23:57:51 -0700576 if failure_cleanup:
Stephen Warren97a54662016-01-22 12:30:09 -0700577 console.drain_console()
Stephen Warren25b05242016-01-27 23:57:51 -0700578
Stephen Warrenaaf4e912016-02-10 13:47:37 -0700579 test_list.append(item.name)
Stephen Warren10e50632016-01-15 11:15:24 -0700580 tests_not_run.remove(item.name)
581
582 try:
Stephen Warren25b05242016-01-27 23:57:51 -0700583 msg_log(msg)
Stephen Warren10e50632016-01-15 11:15:24 -0700584 except:
585 # If something went wrong with logging, it's better to let the test
586 # process continue, which may report other exceptions that triggered
587 # the logging issue (e.g. console.log wasn't created). Hence, just
588 # squash the exception. If the test setup failed due to e.g. syntax
589 # error somewhere else, this won't be seen. However, once that issue
590 # is fixed, if this exception still exists, it will then be logged as
591 # part of the test's stdout.
592 import traceback
Paul Burton00f2d202017-09-14 14:34:43 -0700593 print('Exception occurred while logging runtest status:')
Stephen Warren10e50632016-01-15 11:15:24 -0700594 traceback.print_exc()
595 # FIXME: Can we force a test failure here?
596
597 log.end_section(item.name)
598
Stephen Warren25b05242016-01-27 23:57:51 -0700599 if failure_cleanup:
Stephen Warren10e50632016-01-15 11:15:24 -0700600 console.cleanup_spawn()
601
602 return reports