blob: 0f6d1aa902d420c93ff9a7f89bbbc96dc4318641 [file] [log] [blame]
Simon Glass132be852018-07-06 10:27:23 -06001# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
Simon Glass3609d5e2018-07-06 10:27:34 -06006from contextlib import contextmanager
Simon Glassc27d22d2022-01-22 05:07:28 -07007import doctest
Simon Glass132be852018-07-06 10:27:23 -06008import glob
Simon Glass73306922020-04-17 18:09:01 -06009import multiprocessing
Simon Glass132be852018-07-06 10:27:23 -060010import os
11import sys
Simon Glass73306922020-04-17 18:09:01 -060012import unittest
Simon Glass132be852018-07-06 10:27:23 -060013
Simon Glassa997ea52020-04-17 18:09:04 -060014from patman import command
Simon Glass132be852018-07-06 10:27:23 -060015
Simon Glass945328b2020-04-17 18:08:55 -060016from io import StringIO
Simon Glass3609d5e2018-07-06 10:27:34 -060017
Alper Nebi Yasak51686012022-04-02 20:06:08 +030018buffer_outputs = True
Simon Glass73306922020-04-17 18:09:01 -060019use_concurrent = True
20try:
Simon Glass6eb63c72020-07-09 18:39:34 -060021 from concurrencytest.concurrencytest import ConcurrentTestSuite
22 from concurrencytest.concurrencytest import fork_for_tests
Simon Glass73306922020-04-17 18:09:01 -060023except:
24 use_concurrent = False
25
Simon Glass3609d5e2018-07-06 10:27:34 -060026
Simon Glass1b53d902022-01-29 14:14:14 -070027def run_test_coverage(prog, filter_fname, exclude_list, build_dir, required=None,
Simon Glass5d5930d2020-07-09 18:39:29 -060028 extra_args=None):
Simon Glass132be852018-07-06 10:27:23 -060029 """Run tests and check that we get 100% coverage
30
31 Args:
32 prog: Program to run (with be passed a '-t' argument to run tests
33 filter_fname: Normally all *.py files in the program's directory will
34 be included. If this is not None, then it is used to filter the
35 list so that only filenames that don't contain filter_fname are
36 included.
37 exclude_list: List of file patterns to exclude from the coverage
38 calculation
39 build_dir: Build directory, used to locate libfdt.py
40 required: List of modules which must be in the coverage report
Simon Glass5d5930d2020-07-09 18:39:29 -060041 extra_args (str): Extra arguments to pass to the tool before the -t/test
42 arg
Simon Glass132be852018-07-06 10:27:23 -060043
44 Raises:
45 ValueError if the code coverage is not 100%
46 """
47 # This uses the build output from sandbox_spl to get _libfdt.so
48 path = os.path.dirname(prog)
49 if filter_fname:
50 glob_list = glob.glob(os.path.join(path, '*.py'))
51 glob_list = [fname for fname in glob_list if filter_fname in fname]
52 else:
53 glob_list = []
54 glob_list += exclude_list
Simon Glass102b21c2019-05-17 22:00:54 -060055 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
Simon Glass6eb63c72020-07-09 18:39:34 -060056 glob_list += ['*concurrencytest*']
Simon Glass109e84e2020-07-05 21:41:55 -060057 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
Simon Glassf156e3b2020-04-17 18:09:00 -060058 prefix = ''
59 if build_dir:
60 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
61 cmd = ('%spython3-coverage run '
Simon Glass5d5930d2020-07-09 18:39:29 -060062 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
63 prog, extra_args or '', test_cmd))
Simon Glass132be852018-07-06 10:27:23 -060064 os.system(cmd)
Simon Glass840be732022-01-29 14:14:05 -070065 stdout = command.output('python3-coverage', 'report')
Simon Glass132be852018-07-06 10:27:23 -060066 lines = stdout.splitlines()
67 if required:
68 # Convert '/path/to/name.py' just the module name 'name'
69 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
70 for line in lines if '/etype/' in line])
71 missing_list = required
Simon Glass0baeab72019-07-08 14:25:32 -060072 missing_list.discard('__init__')
Simon Glass132be852018-07-06 10:27:23 -060073 missing_list.difference_update(test_set)
74 if missing_list:
Simon Glass23b8a192019-05-14 15:53:36 -060075 print('Missing tests for %s' % (', '.join(missing_list)))
76 print(stdout)
Simon Glass132be852018-07-06 10:27:23 -060077 ok = False
78
79 coverage = lines[-1].split(' ')[-1]
80 ok = True
Simon Glass23b8a192019-05-14 15:53:36 -060081 print(coverage)
Simon Glass132be852018-07-06 10:27:23 -060082 if coverage != '100%':
Simon Glass23b8a192019-05-14 15:53:36 -060083 print(stdout)
Simon Glass58816412022-08-13 11:40:41 -060084 print("To get a report in 'htmlcov/index.html', type: python3-coverage html")
Simon Glass23b8a192019-05-14 15:53:36 -060085 print('Coverage error: %s, but should be 100%%' % coverage)
Simon Glass132be852018-07-06 10:27:23 -060086 ok = False
87 if not ok:
88 raise ValueError('Test coverage failure')
Simon Glass3609d5e2018-07-06 10:27:34 -060089
90
91# Use this to suppress stdout/stderr output:
92# with capture_sys_output() as (stdout, stderr)
93# ...do something...
94@contextmanager
95def capture_sys_output():
96 capture_out, capture_err = StringIO(), StringIO()
97 old_out, old_err = sys.stdout, sys.stderr
98 try:
99 sys.stdout, sys.stderr = capture_out, capture_err
100 yield capture_out, capture_err
101 finally:
102 sys.stdout, sys.stderr = old_out, old_err
Simon Glass73306922020-04-17 18:09:01 -0600103
104
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300105class FullTextTestResult(unittest.TextTestResult):
106 """A test result class that can print extended text results to a stream
107
108 This is meant to be used by a TestRunner as a result class. Like
109 TextTestResult, this prints out the names of tests as they are run,
110 errors as they occur, and a summary of the results at the end of the
111 test run. Beyond those, this prints information about skipped tests,
112 expected failures and unexpected successes.
113
114 Args:
115 stream: A file-like object to write results to
116 descriptions (bool): True to print descriptions with test names
117 verbosity (int): Detail of printed output per test as they run
118 Test stdout and stderr always get printed when buffering
119 them is disabled by the test runner. In addition to that,
120 0: Print nothing
121 1: Print a dot per test
122 2: Print test names
Alper Nebi Yasak51686012022-04-02 20:06:08 +0300123 3: Print test names, and buffered outputs for failing tests
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300124 """
125 def __init__(self, stream, descriptions, verbosity):
126 self.verbosity = verbosity
127 super().__init__(stream, descriptions, verbosity)
128
129 def printErrors(self):
130 "Called by TestRunner after test run to summarize the tests"
131 # The parent class doesn't keep unexpected successes in the same
132 # format as the rest. Adapt it to what printErrorList expects.
133 unexpected_successes = [
134 (test, 'Test was expected to fail, but succeeded.\n')
135 for test in self.unexpectedSuccesses
136 ]
137
138 super().printErrors() # FAIL and ERROR
139 self.printErrorList('SKIP', self.skipped)
140 self.printErrorList('XFAIL', self.expectedFailures)
141 self.printErrorList('XPASS', unexpected_successes)
142
Alper Nebi Yasak51686012022-04-02 20:06:08 +0300143 def addError(self, test, err):
144 """Called when an error has occurred."""
145 super().addError(test, err)
146 self._mirrorOutput &= self.verbosity >= 3
147
148 def addFailure(self, test, err):
149 """Called when a test has failed."""
150 super().addFailure(test, err)
151 self._mirrorOutput &= self.verbosity >= 3
152
153 def addSubTest(self, test, subtest, err):
154 """Called at the end of a subtest."""
155 super().addSubTest(test, subtest, err)
156 self._mirrorOutput &= self.verbosity >= 3
157
158 def addSuccess(self, test):
159 """Called when a test has completed successfully"""
160 super().addSuccess(test)
161 # Don't print stdout/stderr for successful tests
162 self._mirrorOutput = False
163
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300164 def addSkip(self, test, reason):
165 """Called when a test is skipped."""
166 # Add empty line to keep spacing consistent with other results
167 if not reason.endswith('\n'):
168 reason += '\n'
169 super().addSkip(test, reason)
Alper Nebi Yasak51686012022-04-02 20:06:08 +0300170 self._mirrorOutput &= self.verbosity >= 3
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300171
Alper Nebi Yasak51686012022-04-02 20:06:08 +0300172 def addExpectedFailure(self, test, err):
173 """Called when an expected failure/error occurred."""
174 super().addExpectedFailure(test, err)
175 self._mirrorOutput &= self.verbosity >= 3
176
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300177
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300178def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
Simon Glass1b53d902022-01-29 14:14:14 -0700179 test_name, toolpath, class_and_module_list):
Simon Glass73306922020-04-17 18:09:01 -0600180 """Run a series of test suites and collect the results
181
182 Args:
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300183 toolname: Name of the tool that ran the tests
Simon Glass73306922020-04-17 18:09:01 -0600184 debug: True to enable debugging, which shows a full stack trace on error
185 verbosity: Verbosity level to use (0-4)
186 test_preserve_dirs: True to preserve the input directory used by tests
187 so that it can be examined afterwards (only useful for debugging
188 tests). If a single test is selected (in args[0]) it also preserves
189 the output directory for this test. Both directories are displayed
190 on the command line.
191 processes: Number of processes to use to run tests (None=same as #CPUs)
192 test_name: Name of test to run, or None for all
193 toolpath: List of paths to use for tools
Simon Glassc27d22d2022-01-22 05:07:28 -0700194 class_and_module_list: List of test classes (type class) and module
195 names (type str) to run
Simon Glass73306922020-04-17 18:09:01 -0600196 """
Simon Glass73306922020-04-17 18:09:01 -0600197 sys.argv = [sys.argv[0]]
198 if debug:
199 sys.argv.append('-D')
200 if verbosity:
201 sys.argv.append('-v%d' % verbosity)
202 if toolpath:
203 for path in toolpath:
204 sys.argv += ['--toolpath', path]
205
206 suite = unittest.TestSuite()
207 loader = unittest.TestLoader()
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300208 runner = unittest.TextTestRunner(
209 stream=sys.stdout,
210 verbosity=(1 if verbosity is None else verbosity),
Simon Glass6a9b2252022-08-13 11:40:42 -0600211 buffer=False if test_name else buffer_outputs,
Alper Nebi Yasakfedac7b2022-04-02 20:06:07 +0300212 resultclass=FullTextTestResult,
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300213 )
214
215 if use_concurrent and processes != 1:
216 suite = ConcurrentTestSuite(suite,
Alper Nebi Yasak51686012022-04-02 20:06:08 +0300217 fork_for_tests(processes or multiprocessing.cpu_count(),
Simon Glass6a9b2252022-08-13 11:40:42 -0600218 buffer=False if test_name else buffer_outputs))
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300219
220 for module in class_and_module_list:
221 if isinstance(module, str) and (not test_name or test_name == module):
222 suite.addTests(doctest.DocTestSuite(module))
223
Simon Glassc27d22d2022-01-22 05:07:28 -0700224 for module in class_and_module_list:
225 if isinstance(module, str):
226 continue
Simon Glass73306922020-04-17 18:09:01 -0600227 # Test the test module about our arguments, if it is interested
228 if hasattr(module, 'setup_test_args'):
229 setup_test_args = getattr(module, 'setup_test_args')
230 setup_test_args(preserve_indir=test_preserve_dirs,
231 preserve_outdirs=test_preserve_dirs and test_name is not None,
232 toolpath=toolpath, verbosity=verbosity)
233 if test_name:
Alper Nebi Yasak71e9a4c2022-04-02 20:06:05 +0300234 # Since Python v3.5 If an ImportError or AttributeError occurs
235 # while traversing a name then a synthetic test that raises that
236 # error when run will be returned. Check that the requested test
237 # exists, otherwise these errors are included in the results.
238 if test_name in loader.getTestCaseNames(module):
Simon Glass73306922020-04-17 18:09:01 -0600239 suite.addTests(loader.loadTestsFromName(test_name, module))
Simon Glass73306922020-04-17 18:09:01 -0600240 else:
241 suite.addTests(loader.loadTestsFromTestCase(module))
Alper Nebi Yasakca1c5882022-04-02 20:06:06 +0300242
243 print(f" Running {toolname} tests ".center(70, "="))
244 result = runner.run(suite)
245 print()
246
247 return result