blob: 4d28d9fc922f8c35e1836c59df69eeed52714d44 [file] [log] [blame]
Simon Glass132be852018-07-06 10:27:23 -06001# SPDX-License-Identifier: GPL-2.0+
2#
3# Copyright (c) 2016 Google, Inc
4#
5
Simon Glass3609d5e2018-07-06 10:27:34 -06006from contextlib import contextmanager
Simon Glass132be852018-07-06 10:27:23 -06007import glob
Simon Glass73306922020-04-17 18:09:01 -06008import multiprocessing
Simon Glass132be852018-07-06 10:27:23 -06009import os
10import sys
Simon Glass73306922020-04-17 18:09:01 -060011import unittest
Simon Glass132be852018-07-06 10:27:23 -060012
Simon Glassa997ea52020-04-17 18:09:04 -060013from patman import command
14from patman import test_util
Simon Glass132be852018-07-06 10:27:23 -060015
Simon Glass945328b2020-04-17 18:08:55 -060016from io import StringIO
Simon Glass3609d5e2018-07-06 10:27:34 -060017
Simon Glass73306922020-04-17 18:09:01 -060018use_concurrent = True
19try:
20 from concurrencytest import ConcurrentTestSuite, fork_for_tests
21except:
22 use_concurrent = False
23
Simon Glass3609d5e2018-07-06 10:27:34 -060024
Simon Glass132be852018-07-06 10:27:23 -060025def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
26 """Run tests and check that we get 100% coverage
27
28 Args:
29 prog: Program to run (with be passed a '-t' argument to run tests
30 filter_fname: Normally all *.py files in the program's directory will
31 be included. If this is not None, then it is used to filter the
32 list so that only filenames that don't contain filter_fname are
33 included.
34 exclude_list: List of file patterns to exclude from the coverage
35 calculation
36 build_dir: Build directory, used to locate libfdt.py
37 required: List of modules which must be in the coverage report
38
39 Raises:
40 ValueError if the code coverage is not 100%
41 """
42 # This uses the build output from sandbox_spl to get _libfdt.so
43 path = os.path.dirname(prog)
44 if filter_fname:
45 glob_list = glob.glob(os.path.join(path, '*.py'))
46 glob_list = [fname for fname in glob_list if filter_fname in fname]
47 else:
48 glob_list = []
49 glob_list += exclude_list
Simon Glass102b21c2019-05-17 22:00:54 -060050 glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
Simon Glassebbb5432020-04-17 18:08:58 -060051 test_cmd = 'test' if 'binman' in prog else '-t'
Simon Glassf156e3b2020-04-17 18:09:00 -060052 prefix = ''
53 if build_dir:
54 prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
55 cmd = ('%spython3-coverage run '
56 '--omit "%s" %s %s -P1' % (prefix, ','.join(glob_list),
Simon Glassf46732a2019-07-08 14:25:29 -060057 prog, test_cmd))
Simon Glass132be852018-07-06 10:27:23 -060058 os.system(cmd)
Simon Glassf156e3b2020-04-17 18:09:00 -060059 stdout = command.Output('python3-coverage', 'report')
Simon Glass132be852018-07-06 10:27:23 -060060 lines = stdout.splitlines()
61 if required:
62 # Convert '/path/to/name.py' just the module name 'name'
63 test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
64 for line in lines if '/etype/' in line])
65 missing_list = required
Simon Glass0baeab72019-07-08 14:25:32 -060066 missing_list.discard('__init__')
Simon Glass132be852018-07-06 10:27:23 -060067 missing_list.difference_update(test_set)
68 if missing_list:
Simon Glass23b8a192019-05-14 15:53:36 -060069 print('Missing tests for %s' % (', '.join(missing_list)))
70 print(stdout)
Simon Glass132be852018-07-06 10:27:23 -060071 ok = False
72
73 coverage = lines[-1].split(' ')[-1]
74 ok = True
Simon Glass23b8a192019-05-14 15:53:36 -060075 print(coverage)
Simon Glass132be852018-07-06 10:27:23 -060076 if coverage != '100%':
Simon Glass23b8a192019-05-14 15:53:36 -060077 print(stdout)
Simon Glassf156e3b2020-04-17 18:09:00 -060078 print("Type 'python3-coverage html' to get a report in "
79 'htmlcov/index.html')
Simon Glass23b8a192019-05-14 15:53:36 -060080 print('Coverage error: %s, but should be 100%%' % coverage)
Simon Glass132be852018-07-06 10:27:23 -060081 ok = False
82 if not ok:
83 raise ValueError('Test coverage failure')
Simon Glass3609d5e2018-07-06 10:27:34 -060084
85
86# Use this to suppress stdout/stderr output:
87# with capture_sys_output() as (stdout, stderr)
88# ...do something...
89@contextmanager
90def capture_sys_output():
91 capture_out, capture_err = StringIO(), StringIO()
92 old_out, old_err = sys.stdout, sys.stderr
93 try:
94 sys.stdout, sys.stderr = capture_out, capture_err
95 yield capture_out, capture_err
96 finally:
97 sys.stdout, sys.stderr = old_out, old_err
Simon Glass73306922020-04-17 18:09:01 -060098
99
100def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
101 """Report the results from a suite of tests
102
103 Args:
104 toolname: Name of the tool that ran the tests
105 test_name: Name of test that was run, or None for all
106 result: A unittest.TestResult object containing the results
107 """
108 # Remove errors which just indicate a missing test. Since Python v3.5 If an
109 # ImportError or AttributeError occurs while traversing name then a
110 # synthetic test that raises that error when run will be returned. These
111 # errors are included in the errors accumulated by result.errors.
112 if test_name:
113 errors = []
114
115 for test, err in result.errors:
116 if ("has no attribute '%s'" % test_name) not in err:
117 errors.append((test, err))
118 result.testsRun -= 1
119 result.errors = errors
120
121 print(result)
122 for test, err in result.errors:
123 print(test.id(), err)
124 for test, err in result.failures:
125 print(err, result.failures)
126 if result.skipped:
127 print('%d binman test%s SKIPPED:' %
128 (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
129 for skip_info in result.skipped:
130 print('%s: %s' % (skip_info[0], skip_info[1]))
131 if result.errors or result.failures:
132 print('binman tests FAILED')
133 return 1
134 return 0
135
136
137def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
138 test_name, toolpath, test_class_list):
139 """Run a series of test suites and collect the results
140
141 Args:
142 result: A unittest.TestResult object to add the results to
143 debug: True to enable debugging, which shows a full stack trace on error
144 verbosity: Verbosity level to use (0-4)
145 test_preserve_dirs: True to preserve the input directory used by tests
146 so that it can be examined afterwards (only useful for debugging
147 tests). If a single test is selected (in args[0]) it also preserves
148 the output directory for this test. Both directories are displayed
149 on the command line.
150 processes: Number of processes to use to run tests (None=same as #CPUs)
151 test_name: Name of test to run, or None for all
152 toolpath: List of paths to use for tools
153 test_class_list: List of test classes to run
154 """
155 for module in []:
156 suite = doctest.DocTestSuite(module)
157 suite.run(result)
158
159 sys.argv = [sys.argv[0]]
160 if debug:
161 sys.argv.append('-D')
162 if verbosity:
163 sys.argv.append('-v%d' % verbosity)
164 if toolpath:
165 for path in toolpath:
166 sys.argv += ['--toolpath', path]
167
168 suite = unittest.TestSuite()
169 loader = unittest.TestLoader()
170 for module in test_class_list:
171 # Test the test module about our arguments, if it is interested
172 if hasattr(module, 'setup_test_args'):
173 setup_test_args = getattr(module, 'setup_test_args')
174 setup_test_args(preserve_indir=test_preserve_dirs,
175 preserve_outdirs=test_preserve_dirs and test_name is not None,
176 toolpath=toolpath, verbosity=verbosity)
177 if test_name:
178 try:
179 suite.addTests(loader.loadTestsFromName(test_name, module))
180 except AttributeError:
181 continue
182 else:
183 suite.addTests(loader.loadTestsFromTestCase(module))
184 if use_concurrent and processes != 1:
185 concurrent_suite = ConcurrentTestSuite(suite,
186 fork_for_tests(processes or multiprocessing.cpu_count()))
187 concurrent_suite.run(result)
188 else:
189 suite.run(result)