blob: 20dc1e4924478e222f58d22cf7ad29fa85883692 [file] [log] [blame]
Simon Glass132be852018-07-06 10:27:23 -06001# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
Simon Glass3609d5e2018-07-06 10:27:34 -06006from contextlib import contextmanager
Simon Glass132be852018-07-06 10:27:23 -06007import glob
Simon Glass73306922020-04-17 18:09:01 -06008import multiprocessing
Simon Glass132be852018-07-06 10:27:23 -06009import os
10import sys
Simon Glass73306922020-04-17 18:09:01 -060011import unittest
Simon Glass132be852018-07-06 10:27:23 -060012
Simon Glassa997ea52020-04-17 18:09:04 -060013from patman import command
Simon Glass132be852018-07-06 10:27:23 -060014
Simon Glass945328b2020-04-17 18:08:55 -060015from io import StringIO
Simon Glass3609d5e2018-07-06 10:27:34 -060016
Simon Glass73306922020-04-17 18:09:01 -060017use_concurrent = True
18try:
19 from concurrencytest import ConcurrentTestSuite, fork_for_tests
20except:
21 use_concurrent = False
22
Simon Glass3609d5e2018-07-06 10:27:34 -060023
Simon Glassb70caf72020-07-09 18:39:29 -060024def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
25 extra_args=None):
Simon Glass132be852018-07-06 10:27:23 -060026 """Run tests and check that we get 100% coverage
27
28 Args:
29 prog: Program to run (with be passed a '-t' argument to run tests
30 filter_fname: Normally all *.py files in the program's directory will
31 be included. If this is not None, then it is used to filter the
32 list so that only filenames that don't contain filter_fname are
33 included.
34 exclude_list: List of file patterns to exclude from the coverage
35 calculation
36 build_dir: Build directory, used to locate libfdt.py
37 required: List of modules which must be in the coverage report
Simon Glassb70caf72020-07-09 18:39:29 -060038 extra_args (str): Extra arguments to pass to the tool before the -t/test
39 arg
Simon Glass132be852018-07-06 10:27:23 -060040
41 Raises:
42 ValueError if the code coverage is not 100%
43 """
44 # This uses the build output from sandbox_spl to get _libfdt.so
45 path = os.path.dirname(prog)
46 if filter_fname:
47 glob_list = glob.glob(os.path.join(path, '*.py'))
48 glob_list = [fname for fname in glob_list if filter_fname in fname]
49 else:
50 glob_list = []
51 glob_list += exclude_list
Simon Glass102b21c2019-05-17 22:00:54 -060052 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
Simon Glass9c501e12020-07-05 21:41:55 -060053 test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
Simon Glassf156e3b2020-04-17 18:09:00 -060054 prefix = ''
55 if build_dir:
56 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
57 cmd = ('%spython3-coverage run '
Simon Glassb70caf72020-07-09 18:39:29 -060058 '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
59 prog, extra_args or '', test_cmd))
Simon Glass132be852018-07-06 10:27:23 -060060 os.system(cmd)
Simon Glassf156e3b2020-04-17 18:09:00 -060061 stdout = command.Output('python3-coverage', 'report')
Simon Glass132be852018-07-06 10:27:23 -060062 lines = stdout.splitlines()
63 if required:
64 # Convert '/path/to/name.py' just the module name 'name'
65 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
66 for line in lines if '/etype/' in line])
67 missing_list = required
Simon Glass0baeab72019-07-08 14:25:32 -060068 missing_list.discard('__init__')
Simon Glass132be852018-07-06 10:27:23 -060069 missing_list.difference_update(test_set)
70 if missing_list:
Simon Glass23b8a192019-05-14 15:53:36 -060071 print('Missing tests for %s' % (', '.join(missing_list)))
72 print(stdout)
Simon Glass132be852018-07-06 10:27:23 -060073 ok = False
74
75 coverage = lines[-1].split(' ')[-1]
76 ok = True
Simon Glass23b8a192019-05-14 15:53:36 -060077 print(coverage)
Simon Glass132be852018-07-06 10:27:23 -060078 if coverage != '100%':
Simon Glass23b8a192019-05-14 15:53:36 -060079 print(stdout)
Simon Glassf156e3b2020-04-17 18:09:00 -060080 print("Type 'python3-coverage html' to get a report in "
81 'htmlcov/index.html')
Simon Glass23b8a192019-05-14 15:53:36 -060082 print('Coverage error: %s, but should be 100%%' % coverage)
Simon Glass132be852018-07-06 10:27:23 -060083 ok = False
84 if not ok:
85 raise ValueError('Test coverage failure')
Simon Glass3609d5e2018-07-06 10:27:34 -060086
87
88# Use this to suppress stdout/stderr output:
89# with capture_sys_output() as (stdout, stderr)
90# ...do something...
91@contextmanager
92def capture_sys_output():
93 capture_out, capture_err = StringIO(), StringIO()
94 old_out, old_err = sys.stdout, sys.stderr
95 try:
96 sys.stdout, sys.stderr = capture_out, capture_err
97 yield capture_out, capture_err
98 finally:
99 sys.stdout, sys.stderr = old_out, old_err
Simon Glass73306922020-04-17 18:09:01 -0600100
101
102def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
103 """Report the results from a suite of tests
104
105 Args:
106 toolname: Name of the tool that ran the tests
107 test_name: Name of test that was run, or None for all
108 result: A unittest.TestResult object containing the results
109 """
110 # Remove errors which just indicate a missing test. Since Python v3.5 If an
111 # ImportError or AttributeError occurs while traversing name then a
112 # synthetic test that raises that error when run will be returned. These
113 # errors are included in the errors accumulated by result.errors.
114 if test_name:
115 errors = []
116
117 for test, err in result.errors:
118 if ("has no attribute '%s'" % test_name) not in err:
119 errors.append((test, err))
120 result.testsRun -= 1
121 result.errors = errors
122
123 print(result)
124 for test, err in result.errors:
125 print(test.id(), err)
126 for test, err in result.failures:
127 print(err, result.failures)
128 if result.skipped:
Simon Glass4e98ab92020-07-05 21:41:48 -0600129 print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
130 's' if len(result.skipped) > 1 else ''))
Simon Glass73306922020-04-17 18:09:01 -0600131 for skip_info in result.skipped:
132 print('%s: %s' % (skip_info[0], skip_info[1]))
133 if result.errors or result.failures:
Simon Glass4e98ab92020-07-05 21:41:48 -0600134 print('%s tests FAILED' % toolname)
Simon Glass73306922020-04-17 18:09:01 -0600135 return 1
136 return 0
137
138
139def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
140 test_name, toolpath, test_class_list):
141 """Run a series of test suites and collect the results
142
143 Args:
144 result: A unittest.TestResult object to add the results to
145 debug: True to enable debugging, which shows a full stack trace on error
146 verbosity: Verbosity level to use (0-4)
147 test_preserve_dirs: True to preserve the input directory used by tests
148 so that it can be examined afterwards (only useful for debugging
149 tests). If a single test is selected (in args[0]) it also preserves
150 the output directory for this test. Both directories are displayed
151 on the command line.
152 processes: Number of processes to use to run tests (None=same as #CPUs)
153 test_name: Name of test to run, or None for all
154 toolpath: List of paths to use for tools
155 test_class_list: List of test classes to run
156 """
157 for module in []:
158 suite = doctest.DocTestSuite(module)
159 suite.run(result)
160
161 sys.argv = [sys.argv[0]]
162 if debug:
163 sys.argv.append('-D')
164 if verbosity:
165 sys.argv.append('-v%d' % verbosity)
166 if toolpath:
167 for path in toolpath:
168 sys.argv += ['--toolpath', path]
169
170 suite = unittest.TestSuite()
171 loader = unittest.TestLoader()
172 for module in test_class_list:
173 # Test the test module about our arguments, if it is interested
174 if hasattr(module, 'setup_test_args'):
175 setup_test_args = getattr(module, 'setup_test_args')
176 setup_test_args(preserve_indir=test_preserve_dirs,
177 preserve_outdirs=test_preserve_dirs and test_name is not None,
178 toolpath=toolpath, verbosity=verbosity)
179 if test_name:
180 try:
181 suite.addTests(loader.loadTestsFromName(test_name, module))
182 except AttributeError:
183 continue
184 else:
185 suite.addTests(loader.loadTestsFromTestCase(module))
186 if use_concurrent and processes != 1:
187 concurrent_suite = ConcurrentTestSuite(suite,
188 fork_for_tests(processes or multiprocessing.cpu_count()))
189 concurrent_suite.run(result)
190 else:
191 suite.run(result)