Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 1 | # Copyright (c) 2015 Stephen Warren |
| 2 | # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved. |
| 3 | # |
| 4 | # SPDX-License-Identifier: GPL-2.0 |
| 5 | |
| 6 | # Implementation of pytest run-time hook functions. These are invoked by |
| 7 | # pytest at certain points during operation, e.g. startup, for each executed |
| 8 | # test, at shutdown etc. These hooks perform functions such as: |
| 9 | # - Parsing custom command-line options. |
| 10 | # - Pullilng in user-specified board configuration. |
| 11 | # - Creating the U-Boot console test fixture. |
| 12 | # - Creating the HTML log file. |
| 13 | # - Monitoring each test's results. |
| 14 | # - Implementing custom pytest markers. |
| 15 | |
| 16 | import atexit |
| 17 | import errno |
| 18 | import os |
| 19 | import os.path |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 20 | import pytest |
| 21 | from _pytest.runner import runtestprotocol |
| 22 | import ConfigParser |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 23 | import re |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 24 | import StringIO |
| 25 | import sys |
| 26 | |
| 27 | # Globals: The HTML log file, and the connection to the U-Boot console. |
| 28 | log = None |
| 29 | console = None |
| 30 | |
| 31 | def mkdir_p(path): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 32 | """Create a directory path. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 33 | |
| 34 | This includes creating any intermediate/parent directories. Any errors |
| 35 | caused due to already extant directories are ignored. |
| 36 | |
| 37 | Args: |
| 38 | path: The directory path to create. |
| 39 | |
| 40 | Returns: |
| 41 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 42 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 43 | |
| 44 | try: |
| 45 | os.makedirs(path) |
| 46 | except OSError as exc: |
| 47 | if exc.errno == errno.EEXIST and os.path.isdir(path): |
| 48 | pass |
| 49 | else: |
| 50 | raise |
| 51 | |
| 52 | def pytest_addoption(parser): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 53 | """pytest hook: Add custom command-line options to the cmdline parser. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 54 | |
| 55 | Args: |
| 56 | parser: The pytest command-line parser. |
| 57 | |
| 58 | Returns: |
| 59 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 60 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 61 | |
| 62 | parser.addoption('--build-dir', default=None, |
| 63 | help='U-Boot build directory (O=)') |
| 64 | parser.addoption('--result-dir', default=None, |
| 65 | help='U-Boot test result/tmp directory') |
| 66 | parser.addoption('--persistent-data-dir', default=None, |
| 67 | help='U-Boot test persistent generated data directory') |
| 68 | parser.addoption('--board-type', '--bd', '-B', default='sandbox', |
| 69 | help='U-Boot board type') |
| 70 | parser.addoption('--board-identity', '--id', default='na', |
| 71 | help='U-Boot board identity/instance') |
| 72 | parser.addoption('--build', default=False, action='store_true', |
| 73 | help='Compile U-Boot before running tests') |
Stephen Warren | 33db1ee | 2016-02-04 16:11:50 -0700 | [diff] [blame] | 74 | parser.addoption('--gdbserver', default=None, |
| 75 | help='Run sandbox under gdbserver. The argument is the channel '+ |
| 76 | 'over which gdbserver should communicate, e.g. localhost:1234') |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 77 | |
| 78 | def pytest_configure(config): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 79 | """pytest hook: Perform custom initialization at startup time. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 80 | |
| 81 | Args: |
| 82 | config: The pytest configuration. |
| 83 | |
| 84 | Returns: |
| 85 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 86 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 87 | |
| 88 | global log |
| 89 | global console |
| 90 | global ubconfig |
| 91 | |
| 92 | test_py_dir = os.path.dirname(os.path.abspath(__file__)) |
| 93 | source_dir = os.path.dirname(os.path.dirname(test_py_dir)) |
| 94 | |
| 95 | board_type = config.getoption('board_type') |
| 96 | board_type_filename = board_type.replace('-', '_') |
| 97 | |
| 98 | board_identity = config.getoption('board_identity') |
| 99 | board_identity_filename = board_identity.replace('-', '_') |
| 100 | |
| 101 | build_dir = config.getoption('build_dir') |
| 102 | if not build_dir: |
| 103 | build_dir = source_dir + '/build-' + board_type |
| 104 | mkdir_p(build_dir) |
| 105 | |
| 106 | result_dir = config.getoption('result_dir') |
| 107 | if not result_dir: |
| 108 | result_dir = build_dir |
| 109 | mkdir_p(result_dir) |
| 110 | |
| 111 | persistent_data_dir = config.getoption('persistent_data_dir') |
| 112 | if not persistent_data_dir: |
| 113 | persistent_data_dir = build_dir + '/persistent-data' |
| 114 | mkdir_p(persistent_data_dir) |
| 115 | |
Stephen Warren | 33db1ee | 2016-02-04 16:11:50 -0700 | [diff] [blame] | 116 | gdbserver = config.getoption('gdbserver') |
| 117 | if gdbserver and board_type != 'sandbox': |
| 118 | raise Exception('--gdbserver only supported with sandbox') |
| 119 | |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 120 | import multiplexed_log |
| 121 | log = multiplexed_log.Logfile(result_dir + '/test-log.html') |
| 122 | |
| 123 | if config.getoption('build'): |
| 124 | if build_dir != source_dir: |
| 125 | o_opt = 'O=%s' % build_dir |
| 126 | else: |
| 127 | o_opt = '' |
| 128 | cmds = ( |
| 129 | ['make', o_opt, '-s', board_type + '_defconfig'], |
| 130 | ['make', o_opt, '-s', '-j8'], |
| 131 | ) |
Stephen Warren | e3f2a50 | 2016-02-03 16:46:34 -0700 | [diff] [blame] | 132 | with log.section('make'): |
| 133 | runner = log.get_runner('make', sys.stdout) |
| 134 | for cmd in cmds: |
| 135 | runner.run(cmd, cwd=source_dir) |
| 136 | runner.close() |
| 137 | log.status_pass('OK') |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 138 | |
| 139 | class ArbitraryAttributeContainer(object): |
| 140 | pass |
| 141 | |
| 142 | ubconfig = ArbitraryAttributeContainer() |
| 143 | ubconfig.brd = dict() |
| 144 | ubconfig.env = dict() |
| 145 | |
| 146 | modules = [ |
| 147 | (ubconfig.brd, 'u_boot_board_' + board_type_filename), |
| 148 | (ubconfig.env, 'u_boot_boardenv_' + board_type_filename), |
| 149 | (ubconfig.env, 'u_boot_boardenv_' + board_type_filename + '_' + |
| 150 | board_identity_filename), |
| 151 | ] |
| 152 | for (dict_to_fill, module_name) in modules: |
| 153 | try: |
| 154 | module = __import__(module_name) |
| 155 | except ImportError: |
| 156 | continue |
| 157 | dict_to_fill.update(module.__dict__) |
| 158 | |
| 159 | ubconfig.buildconfig = dict() |
| 160 | |
| 161 | for conf_file in ('.config', 'include/autoconf.mk'): |
| 162 | dot_config = build_dir + '/' + conf_file |
| 163 | if not os.path.exists(dot_config): |
| 164 | raise Exception(conf_file + ' does not exist; ' + |
| 165 | 'try passing --build option?') |
| 166 | |
| 167 | with open(dot_config, 'rt') as f: |
| 168 | ini_str = '[root]\n' + f.read() |
| 169 | ini_sio = StringIO.StringIO(ini_str) |
| 170 | parser = ConfigParser.RawConfigParser() |
| 171 | parser.readfp(ini_sio) |
| 172 | ubconfig.buildconfig.update(parser.items('root')) |
| 173 | |
| 174 | ubconfig.test_py_dir = test_py_dir |
| 175 | ubconfig.source_dir = source_dir |
| 176 | ubconfig.build_dir = build_dir |
| 177 | ubconfig.result_dir = result_dir |
| 178 | ubconfig.persistent_data_dir = persistent_data_dir |
| 179 | ubconfig.board_type = board_type |
| 180 | ubconfig.board_identity = board_identity |
Stephen Warren | 33db1ee | 2016-02-04 16:11:50 -0700 | [diff] [blame] | 181 | ubconfig.gdbserver = gdbserver |
Simon Glass | 3b09787 | 2016-07-03 09:40:36 -0600 | [diff] [blame] | 182 | ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 183 | |
| 184 | env_vars = ( |
| 185 | 'board_type', |
| 186 | 'board_identity', |
| 187 | 'source_dir', |
| 188 | 'test_py_dir', |
| 189 | 'build_dir', |
| 190 | 'result_dir', |
| 191 | 'persistent_data_dir', |
| 192 | ) |
| 193 | for v in env_vars: |
| 194 | os.environ['U_BOOT_' + v.upper()] = getattr(ubconfig, v) |
| 195 | |
Simon Glass | 13f422e | 2016-07-04 11:58:37 -0600 | [diff] [blame] | 196 | if board_type.startswith('sandbox'): |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 197 | import u_boot_console_sandbox |
| 198 | console = u_boot_console_sandbox.ConsoleSandbox(log, ubconfig) |
| 199 | else: |
| 200 | import u_boot_console_exec_attach |
| 201 | console = u_boot_console_exec_attach.ConsoleExecAttach(log, ubconfig) |
| 202 | |
Simon Glass | 7186167 | 2017-11-25 11:57:32 -0700 | [diff] [blame^] | 203 | re_ut_test_list = re.compile(r'_u_boot_list_2_(.*)_test_2_\1_test_(.*)\s*$') |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 204 | def generate_ut_subtest(metafunc, fixture_name): |
| 205 | """Provide parametrization for a ut_subtest fixture. |
| 206 | |
| 207 | Determines the set of unit tests built into a U-Boot binary by parsing the |
| 208 | list of symbols generated by the build process. Provides this information |
| 209 | to test functions by parameterizing their ut_subtest fixture parameter. |
| 210 | |
| 211 | Args: |
| 212 | metafunc: The pytest test function. |
| 213 | fixture_name: The fixture name to test. |
| 214 | |
| 215 | Returns: |
| 216 | Nothing. |
| 217 | """ |
| 218 | |
| 219 | fn = console.config.build_dir + '/u-boot.sym' |
| 220 | try: |
| 221 | with open(fn, 'rt') as f: |
| 222 | lines = f.readlines() |
| 223 | except: |
| 224 | lines = [] |
| 225 | lines.sort() |
| 226 | |
| 227 | vals = [] |
| 228 | for l in lines: |
| 229 | m = re_ut_test_list.search(l) |
| 230 | if not m: |
| 231 | continue |
| 232 | vals.append(m.group(1) + ' ' + m.group(2)) |
| 233 | |
| 234 | ids = ['ut_' + s.replace(' ', '_') for s in vals] |
| 235 | metafunc.parametrize(fixture_name, vals, ids=ids) |
| 236 | |
| 237 | def generate_config(metafunc, fixture_name): |
| 238 | """Provide parametrization for {env,brd}__ fixtures. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 239 | |
| 240 | If a test function takes parameter(s) (fixture names) of the form brd__xxx |
| 241 | or env__xxx, the brd and env configuration dictionaries are consulted to |
| 242 | find the list of values to use for those parameters, and the test is |
| 243 | parametrized so that it runs once for each combination of values. |
| 244 | |
| 245 | Args: |
| 246 | metafunc: The pytest test function. |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 247 | fixture_name: The fixture name to test. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 248 | |
| 249 | Returns: |
| 250 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 251 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 252 | |
| 253 | subconfigs = { |
| 254 | 'brd': console.config.brd, |
| 255 | 'env': console.config.env, |
| 256 | } |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 257 | parts = fixture_name.split('__') |
| 258 | if len(parts) < 2: |
| 259 | return |
| 260 | if parts[0] not in subconfigs: |
| 261 | return |
| 262 | subconfig = subconfigs[parts[0]] |
| 263 | vals = [] |
| 264 | val = subconfig.get(fixture_name, []) |
| 265 | # If that exact name is a key in the data source: |
| 266 | if val: |
| 267 | # ... use the dict value as a single parameter value. |
| 268 | vals = (val, ) |
| 269 | else: |
| 270 | # ... otherwise, see if there's a key that contains a list of |
| 271 | # values to use instead. |
| 272 | vals = subconfig.get(fixture_name+ 's', []) |
| 273 | def fixture_id(index, val): |
| 274 | try: |
| 275 | return val['fixture_id'] |
| 276 | except: |
| 277 | return fixture_name + str(index) |
| 278 | ids = [fixture_id(index, val) for (index, val) in enumerate(vals)] |
| 279 | metafunc.parametrize(fixture_name, vals, ids=ids) |
| 280 | |
| 281 | def pytest_generate_tests(metafunc): |
| 282 | """pytest hook: parameterize test functions based on custom rules. |
| 283 | |
| 284 | Check each test function parameter (fixture name) to see if it is one of |
| 285 | our custom names, and if so, provide the correct parametrization for that |
| 286 | parameter. |
| 287 | |
| 288 | Args: |
| 289 | metafunc: The pytest test function. |
| 290 | |
| 291 | Returns: |
| 292 | Nothing. |
| 293 | """ |
| 294 | |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 295 | for fn in metafunc.fixturenames: |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 296 | if fn == 'ut_subtest': |
| 297 | generate_ut_subtest(metafunc, fn) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 298 | continue |
Stephen Warren | 770fe17 | 2016-02-08 14:44:16 -0700 | [diff] [blame] | 299 | generate_config(metafunc, fn) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 300 | |
Stefan Brüns | 364ea87 | 2016-11-05 17:45:32 +0100 | [diff] [blame] | 301 | @pytest.fixture(scope='session') |
| 302 | def u_boot_log(request): |
| 303 | """Generate the value of a test's log fixture. |
| 304 | |
| 305 | Args: |
| 306 | request: The pytest request. |
| 307 | |
| 308 | Returns: |
| 309 | The fixture value. |
| 310 | """ |
| 311 | |
| 312 | return console.log |
| 313 | |
| 314 | @pytest.fixture(scope='session') |
| 315 | def u_boot_config(request): |
| 316 | """Generate the value of a test's u_boot_config fixture. |
| 317 | |
| 318 | Args: |
| 319 | request: The pytest request. |
| 320 | |
| 321 | Returns: |
| 322 | The fixture value. |
| 323 | """ |
| 324 | |
| 325 | return console.config |
| 326 | |
Stephen Warren | e1d24d0 | 2016-01-22 12:30:08 -0700 | [diff] [blame] | 327 | @pytest.fixture(scope='function') |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 328 | def u_boot_console(request): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 329 | """Generate the value of a test's u_boot_console fixture. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 330 | |
| 331 | Args: |
| 332 | request: The pytest request. |
| 333 | |
| 334 | Returns: |
| 335 | The fixture value. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 336 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 337 | |
Stephen Warren | e1d24d0 | 2016-01-22 12:30:08 -0700 | [diff] [blame] | 338 | console.ensure_spawned() |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 339 | return console |
| 340 | |
Stephen Warren | e3f2a50 | 2016-02-03 16:46:34 -0700 | [diff] [blame] | 341 | anchors = {} |
Stephen Warren | aaf4e91 | 2016-02-10 13:47:37 -0700 | [diff] [blame] | 342 | tests_not_run = [] |
| 343 | tests_failed = [] |
| 344 | tests_xpassed = [] |
| 345 | tests_xfailed = [] |
| 346 | tests_skipped = [] |
| 347 | tests_passed = [] |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 348 | |
| 349 | def pytest_itemcollected(item): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 350 | """pytest hook: Called once for each test found during collection. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 351 | |
| 352 | This enables our custom result analysis code to see the list of all tests |
| 353 | that should eventually be run. |
| 354 | |
| 355 | Args: |
| 356 | item: The item that was collected. |
| 357 | |
| 358 | Returns: |
| 359 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 360 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 361 | |
Stephen Warren | aaf4e91 | 2016-02-10 13:47:37 -0700 | [diff] [blame] | 362 | tests_not_run.append(item.name) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 363 | |
| 364 | def cleanup(): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 365 | """Clean up all global state. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 366 | |
| 367 | Executed (via atexit) once the entire test process is complete. This |
| 368 | includes logging the status of all tests, and the identity of any failed |
| 369 | or skipped tests. |
| 370 | |
| 371 | Args: |
| 372 | None. |
| 373 | |
| 374 | Returns: |
| 375 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 376 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 377 | |
| 378 | if console: |
| 379 | console.close() |
| 380 | if log: |
Stephen Warren | e3f2a50 | 2016-02-03 16:46:34 -0700 | [diff] [blame] | 381 | with log.section('Status Report', 'status_report'): |
| 382 | log.status_pass('%d passed' % len(tests_passed)) |
| 383 | if tests_skipped: |
| 384 | log.status_skipped('%d skipped' % len(tests_skipped)) |
| 385 | for test in tests_skipped: |
| 386 | anchor = anchors.get(test, None) |
| 387 | log.status_skipped('... ' + test, anchor) |
| 388 | if tests_xpassed: |
| 389 | log.status_xpass('%d xpass' % len(tests_xpassed)) |
| 390 | for test in tests_xpassed: |
| 391 | anchor = anchors.get(test, None) |
| 392 | log.status_xpass('... ' + test, anchor) |
| 393 | if tests_xfailed: |
| 394 | log.status_xfail('%d xfail' % len(tests_xfailed)) |
| 395 | for test in tests_xfailed: |
| 396 | anchor = anchors.get(test, None) |
| 397 | log.status_xfail('... ' + test, anchor) |
| 398 | if tests_failed: |
| 399 | log.status_fail('%d failed' % len(tests_failed)) |
| 400 | for test in tests_failed: |
| 401 | anchor = anchors.get(test, None) |
| 402 | log.status_fail('... ' + test, anchor) |
| 403 | if tests_not_run: |
| 404 | log.status_fail('%d not run' % len(tests_not_run)) |
| 405 | for test in tests_not_run: |
| 406 | anchor = anchors.get(test, None) |
| 407 | log.status_fail('... ' + test, anchor) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 408 | log.close() |
| 409 | atexit.register(cleanup) |
| 410 | |
| 411 | def setup_boardspec(item): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 412 | """Process any 'boardspec' marker for a test. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 413 | |
| 414 | Such a marker lists the set of board types that a test does/doesn't |
| 415 | support. If tests are being executed on an unsupported board, the test is |
| 416 | marked to be skipped. |
| 417 | |
| 418 | Args: |
| 419 | item: The pytest test item. |
| 420 | |
| 421 | Returns: |
| 422 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 423 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 424 | |
| 425 | mark = item.get_marker('boardspec') |
| 426 | if not mark: |
| 427 | return |
| 428 | required_boards = [] |
| 429 | for board in mark.args: |
| 430 | if board.startswith('!'): |
| 431 | if ubconfig.board_type == board[1:]: |
Stephen Warren | 0f0eeac | 2017-09-18 11:11:48 -0600 | [diff] [blame] | 432 | pytest.skip('board "%s" not supported' % ubconfig.board_type) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 433 | return |
| 434 | else: |
| 435 | required_boards.append(board) |
| 436 | if required_boards and ubconfig.board_type not in required_boards: |
Stephen Warren | 0f0eeac | 2017-09-18 11:11:48 -0600 | [diff] [blame] | 437 | pytest.skip('board "%s" not supported' % ubconfig.board_type) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 438 | |
| 439 | def setup_buildconfigspec(item): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 440 | """Process any 'buildconfigspec' marker for a test. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 441 | |
| 442 | Such a marker lists some U-Boot configuration feature that the test |
| 443 | requires. If tests are being executed on an U-Boot build that doesn't |
| 444 | have the required feature, the test is marked to be skipped. |
| 445 | |
| 446 | Args: |
| 447 | item: The pytest test item. |
| 448 | |
| 449 | Returns: |
| 450 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 451 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 452 | |
| 453 | mark = item.get_marker('buildconfigspec') |
| 454 | if not mark: |
| 455 | return |
| 456 | for option in mark.args: |
| 457 | if not ubconfig.buildconfig.get('config_' + option.lower(), None): |
Stephen Warren | 0f0eeac | 2017-09-18 11:11:48 -0600 | [diff] [blame] | 458 | pytest.skip('.config feature "%s" not enabled' % option.lower()) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 459 | |
Stephen Warren | 2079db3 | 2017-09-18 11:11:49 -0600 | [diff] [blame] | 460 | def tool_is_in_path(tool): |
| 461 | for path in os.environ["PATH"].split(os.pathsep): |
| 462 | fn = os.path.join(path, tool) |
| 463 | if os.path.isfile(fn) and os.access(fn, os.X_OK): |
| 464 | return True |
| 465 | return False |
| 466 | |
| 467 | def setup_requiredtool(item): |
| 468 | """Process any 'requiredtool' marker for a test. |
| 469 | |
| 470 | Such a marker lists some external tool (binary, executable, application) |
| 471 | that the test requires. If tests are being executed on a system that |
| 472 | doesn't have the required tool, the test is marked to be skipped. |
| 473 | |
| 474 | Args: |
| 475 | item: The pytest test item. |
| 476 | |
| 477 | Returns: |
| 478 | Nothing. |
| 479 | """ |
| 480 | |
| 481 | mark = item.get_marker('requiredtool') |
| 482 | if not mark: |
| 483 | return |
| 484 | for tool in mark.args: |
| 485 | if not tool_is_in_path(tool): |
| 486 | pytest.skip('tool "%s" not in $PATH' % tool) |
| 487 | |
Stephen Warren | 3e3d143 | 2016-10-17 17:25:52 -0600 | [diff] [blame] | 488 | def start_test_section(item): |
| 489 | anchors[item.name] = log.start_section(item.name) |
| 490 | |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 491 | def pytest_runtest_setup(item): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 492 | """pytest hook: Configure (set up) a test item. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 493 | |
| 494 | Called once for each test to perform any custom configuration. This hook |
| 495 | is used to skip the test if certain conditions apply. |
| 496 | |
| 497 | Args: |
| 498 | item: The pytest test item. |
| 499 | |
| 500 | Returns: |
| 501 | Nothing. |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 502 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 503 | |
Stephen Warren | 3e3d143 | 2016-10-17 17:25:52 -0600 | [diff] [blame] | 504 | start_test_section(item) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 505 | setup_boardspec(item) |
| 506 | setup_buildconfigspec(item) |
Stephen Warren | 2079db3 | 2017-09-18 11:11:49 -0600 | [diff] [blame] | 507 | setup_requiredtool(item) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 508 | |
| 509 | def pytest_runtest_protocol(item, nextitem): |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 510 | """pytest hook: Called to execute a test. |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 511 | |
| 512 | This hook wraps the standard pytest runtestprotocol() function in order |
| 513 | to acquire visibility into, and record, each test function's result. |
| 514 | |
| 515 | Args: |
| 516 | item: The pytest test item to execute. |
| 517 | nextitem: The pytest test item that will be executed after this one. |
| 518 | |
| 519 | Returns: |
| 520 | A list of pytest reports (test result data). |
Stephen Warren | 75e731e | 2016-01-26 13:41:30 -0700 | [diff] [blame] | 521 | """ |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 522 | |
| 523 | reports = runtestprotocol(item, nextitem=nextitem) |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 524 | |
Stephen Warren | 3e3d143 | 2016-10-17 17:25:52 -0600 | [diff] [blame] | 525 | # In pytest 3, runtestprotocol() may not call pytest_runtest_setup() if |
| 526 | # the test is skipped. That call is required to create the test's section |
| 527 | # in the log file. The call to log.end_section() requires that the log |
| 528 | # contain a section for this test. Create a section for the test if it |
| 529 | # doesn't already exist. |
| 530 | if not item.name in anchors: |
| 531 | start_test_section(item) |
| 532 | |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 533 | failure_cleanup = False |
| 534 | test_list = tests_passed |
| 535 | msg = 'OK' |
| 536 | msg_log = log.status_pass |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 537 | for report in reports: |
| 538 | if report.outcome == 'failed': |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 539 | if hasattr(report, 'wasxfail'): |
| 540 | test_list = tests_xpassed |
| 541 | msg = 'XPASSED' |
| 542 | msg_log = log.status_xpass |
| 543 | else: |
| 544 | failure_cleanup = True |
| 545 | test_list = tests_failed |
| 546 | msg = 'FAILED:\n' + str(report.longrepr) |
| 547 | msg_log = log.status_fail |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 548 | break |
| 549 | if report.outcome == 'skipped': |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 550 | if hasattr(report, 'wasxfail'): |
| 551 | failure_cleanup = True |
| 552 | test_list = tests_xfailed |
| 553 | msg = 'XFAILED:\n' + str(report.longrepr) |
| 554 | msg_log = log.status_xfail |
| 555 | break |
| 556 | test_list = tests_skipped |
| 557 | msg = 'SKIPPED:\n' + str(report.longrepr) |
| 558 | msg_log = log.status_skipped |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 559 | |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 560 | if failure_cleanup: |
Stephen Warren | 97a5466 | 2016-01-22 12:30:09 -0700 | [diff] [blame] | 561 | console.drain_console() |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 562 | |
Stephen Warren | aaf4e91 | 2016-02-10 13:47:37 -0700 | [diff] [blame] | 563 | test_list.append(item.name) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 564 | tests_not_run.remove(item.name) |
| 565 | |
| 566 | try: |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 567 | msg_log(msg) |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 568 | except: |
| 569 | # If something went wrong with logging, it's better to let the test |
| 570 | # process continue, which may report other exceptions that triggered |
| 571 | # the logging issue (e.g. console.log wasn't created). Hence, just |
| 572 | # squash the exception. If the test setup failed due to e.g. syntax |
| 573 | # error somewhere else, this won't be seen. However, once that issue |
| 574 | # is fixed, if this exception still exists, it will then be logged as |
| 575 | # part of the test's stdout. |
| 576 | import traceback |
| 577 | print 'Exception occurred while logging runtest status:' |
| 578 | traceback.print_exc() |
| 579 | # FIXME: Can we force a test failure here? |
| 580 | |
| 581 | log.end_section(item.name) |
| 582 | |
Stephen Warren | 25b0524 | 2016-01-27 23:57:51 -0700 | [diff] [blame] | 583 | if failure_cleanup: |
Stephen Warren | 10e5063 | 2016-01-15 11:15:24 -0700 | [diff] [blame] | 584 | console.cleanup_spawn() |
| 585 | |
| 586 | return reports |