blob: 4e261755dc6a615a0b2d7d891d7a73549b24409f [file] [log] [blame]
Simon Glass132be852018-07-06 10:27:23 -06001# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
Simon Glass3609d5e2018-07-06 10:27:34 -06006from contextlib import contextmanager
Simon Glass132be852018-07-06 10:27:23 -06007import glob
Simon Glass73306922020-04-17 18:09:01 -06008import multiprocessing
Simon Glass132be852018-07-06 10:27:23 -06009import os
10import sys
Simon Glass73306922020-04-17 18:09:01 -060011import unittest
Simon Glass132be852018-07-06 10:27:23 -060012
Simon Glassa997ea52020-04-17 18:09:04 -060013from patman import command
Simon Glass132be852018-07-06 10:27:23 -060014
Simon Glass945328b2020-04-17 18:08:55 -060015from io import StringIO
Simon Glass3609d5e2018-07-06 10:27:34 -060016
Simon Glass73306922020-04-17 18:09:01 -060017use_concurrent = True
18try:
Simon Glass6eb63c72020-07-09 18:39:34 -060019 from concurrencytest.concurrencytest import ConcurrentTestSuite
20 from concurrencytest.concurrencytest import fork_for_tests
Simon Glass73306922020-04-17 18:09:01 -060021except:
22 use_concurrent = False
23
Simon Glass3609d5e2018-07-06 10:27:34 -060024
Simon Glass5d5930d2020-07-09 18:39:29 -060025def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
26 extra_args=None):
Simon Glass132be852018-07-06 10:27:23 -060027 """Run tests and check that we get 100% coverage
28
29 Args:
30 prog: Program to run (with be passed a '-t' argument to run tests
31 filter_fname: Normally all *.py files in the program's directory will
32 be included. If this is not None, then it is used to filter the
33 list so that only filenames that don't contain filter_fname are
34 included.
35 exclude_list: List of file patterns to exclude from the coverage
36 calculation
37 build_dir: Build directory, used to locate libfdt.py
38 required: List of modules which must be in the coverage report
Simon Glass5d5930d2020-07-09 18:39:29 -060039 extra_args (str): Extra arguments to pass to the tool before the -t/test
40 arg
Simon Glass132be852018-07-06 10:27:23 -060041
42 Raises:
43 ValueError if the code coverage is not 100%
44 """
45 # This uses the build output from sandbox_spl to get _libfdt.so
46 path = os.path.dirname(prog)
47 if filter_fname:
48 glob_list = glob.glob(os.path.join(path, '*.py'))
49 glob_list = [fname for fname in glob_list if filter_fname in fname]
50 else:
51 glob_list = []
52 glob_list += exclude_list
Simon Glass102b21c2019-05-17 22:00:54 -060053 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
Simon Glass6eb63c72020-07-09 18:39:34 -060054 glob_list += ['*concurrencytest*']
Simon Glass109e84e2020-07-05 21:41:55 -060055 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
Simon Glassf156e3b2020-04-17 18:09:00 -060056 prefix = ''
57 if build_dir:
58 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
59 cmd = ('%spython3-coverage run '
Simon Glass5d5930d2020-07-09 18:39:29 -060060 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
61 prog, extra_args or '', test_cmd))
Simon Glass132be852018-07-06 10:27:23 -060062 os.system(cmd)
Simon Glassf156e3b2020-04-17 18:09:00 -060063 stdout = command.Output('python3-coverage', 'report')
Simon Glass132be852018-07-06 10:27:23 -060064 lines = stdout.splitlines()
65 if required:
66 # Convert '/path/to/name.py' just the module name 'name'
67 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
68 for line in lines if '/etype/' in line])
69 missing_list = required
Simon Glass0baeab72019-07-08 14:25:32 -060070 missing_list.discard('__init__')
Simon Glass132be852018-07-06 10:27:23 -060071 missing_list.difference_update(test_set)
72 if missing_list:
Simon Glass23b8a192019-05-14 15:53:36 -060073 print('Missing tests for %s' % (', '.join(missing_list)))
74 print(stdout)
Simon Glass132be852018-07-06 10:27:23 -060075 ok = False
76
77 coverage = lines[-1].split(' ')[-1]
78 ok = True
Simon Glass23b8a192019-05-14 15:53:36 -060079 print(coverage)
Simon Glass132be852018-07-06 10:27:23 -060080 if coverage != '100%':
Simon Glass23b8a192019-05-14 15:53:36 -060081 print(stdout)
Simon Glassf156e3b2020-04-17 18:09:00 -060082 print("Type 'python3-coverage html' to get a report in "
83 'htmlcov/index.html')
Simon Glass23b8a192019-05-14 15:53:36 -060084 print('Coverage error: %s, but should be 100%%' % coverage)
Simon Glass132be852018-07-06 10:27:23 -060085 ok = False
86 if not ok:
87 raise ValueError('Test coverage failure')
Simon Glass3609d5e2018-07-06 10:27:34 -060088
89
90# Use this to suppress stdout/stderr output:
91# with capture_sys_output() as (stdout, stderr)
92# ...do something...
93@contextmanager
94def capture_sys_output():
95 capture_out, capture_err = StringIO(), StringIO()
96 old_out, old_err = sys.stdout, sys.stderr
97 try:
98 sys.stdout, sys.stderr = capture_out, capture_err
99 yield capture_out, capture_err
100 finally:
101 sys.stdout, sys.stderr = old_out, old_err
Simon Glass73306922020-04-17 18:09:01 -0600102
103
104def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
105 """Report the results from a suite of tests
106
107 Args:
108 toolname: Name of the tool that ran the tests
109 test_name: Name of test that was run, or None for all
110 result: A unittest.TestResult object containing the results
111 """
112 # Remove errors which just indicate a missing test. Since Python v3.5 If an
113 # ImportError or AttributeError occurs while traversing name then a
114 # synthetic test that raises that error when run will be returned. These
115 # errors are included in the errors accumulated by result.errors.
116 if test_name:
117 errors = []
118
119 for test, err in result.errors:
120 if ("has no attribute '%s'" % test_name) not in err:
121 errors.append((test, err))
122 result.testsRun -= 1
123 result.errors = errors
124
125 print(result)
126 for test, err in result.errors:
127 print(test.id(), err)
128 for test, err in result.failures:
129 print(err, result.failures)
130 if result.skipped:
Simon Glassbddb4382020-07-05 21:41:48 -0600131 print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
132 's' if len(result.skipped) > 1 else ''))
Simon Glass73306922020-04-17 18:09:01 -0600133 for skip_info in result.skipped:
134 print('%s: %s' % (skip_info[0], skip_info[1]))
135 if result.errors or result.failures:
Simon Glassbddb4382020-07-05 21:41:48 -0600136 print('%s tests FAILED' % toolname)
Simon Glass73306922020-04-17 18:09:01 -0600137 return 1
138 return 0
139
140
141def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
142 test_name, toolpath, test_class_list):
143 """Run a series of test suites and collect the results
144
145 Args:
146 result: A unittest.TestResult object to add the results to
147 debug: True to enable debugging, which shows a full stack trace on error
148 verbosity: Verbosity level to use (0-4)
149 test_preserve_dirs: True to preserve the input directory used by tests
150 so that it can be examined afterwards (only useful for debugging
151 tests). If a single test is selected (in args[0]) it also preserves
152 the output directory for this test. Both directories are displayed
153 on the command line.
154 processes: Number of processes to use to run tests (None=same as #CPUs)
155 test_name: Name of test to run, or None for all
156 toolpath: List of paths to use for tools
157 test_class_list: List of test classes to run
158 """
159 for module in []:
160 suite = doctest.DocTestSuite(module)
161 suite.run(result)
162
163 sys.argv = [sys.argv[0]]
164 if debug:
165 sys.argv.append('-D')
166 if verbosity:
167 sys.argv.append('-v%d' % verbosity)
168 if toolpath:
169 for path in toolpath:
170 sys.argv += ['--toolpath', path]
171
172 suite = unittest.TestSuite()
173 loader = unittest.TestLoader()
174 for module in test_class_list:
175 # Test the test module about our arguments, if it is interested
176 if hasattr(module, 'setup_test_args'):
177 setup_test_args = getattr(module, 'setup_test_args')
178 setup_test_args(preserve_indir=test_preserve_dirs,
179 preserve_outdirs=test_preserve_dirs and test_name is not None,
180 toolpath=toolpath, verbosity=verbosity)
181 if test_name:
182 try:
183 suite.addTests(loader.loadTestsFromName(test_name, module))
184 except AttributeError:
185 continue
186 else:
187 suite.addTests(loader.loadTestsFromTestCase(module))
188 if use_concurrent and processes != 1:
189 concurrent_suite = ConcurrentTestSuite(suite,
190 fork_for_tests(processes or multiprocessing.cpu_count()))
191 concurrent_suite.run(result)
192 else:
193 suite.run(result)