1"""
2Tests of regrtest.py.
3
4Note: test_regrtest cannot be run twice in parallel.
5"""
6
7import contextlib
8import glob
9import io
10import os.path
11import platform
12import re
13import subprocess
14import sys
15import sysconfig
16import tempfile
17import textwrap
18import time
19import unittest
20from test import libregrtest
21from test import support
22from test.support import os_helper
23from test.libregrtest import utils, setup
24
25if not support.has_subprocess_support:
26    raise unittest.SkipTest("test module requires subprocess")
27
28Py_DEBUG = hasattr(sys, 'gettotalrefcount')
29ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
30ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
31LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
32
33EXITCODE_BAD_TEST = 2
34EXITCODE_ENV_CHANGED = 3
35EXITCODE_NO_TESTS_RAN = 4
36EXITCODE_INTERRUPTED = 130
37
38TEST_INTERRUPTED = textwrap.dedent("""
39    from signal import SIGINT, raise_signal
40    try:
41        raise_signal(SIGINT)
42    except ImportError:
43        import os
44        os.kill(os.getpid(), SIGINT)
45    """)
46
47
48class ParseArgsTestCase(unittest.TestCase):
49    """
50    Test regrtest's argument parsing, function _parse_args().
51    """
52
53    def checkError(self, args, msg):
54        with support.captured_stderr() as err, self.assertRaises(SystemExit):
55            libregrtest._parse_args(args)
56        self.assertIn(msg, err.getvalue())
57
58    def test_help(self):
59        for opt in '-h', '--help':
60            with self.subTest(opt=opt):
61                with support.captured_stdout() as out, \
62                     self.assertRaises(SystemExit):
63                    libregrtest._parse_args([opt])
64                self.assertIn('Run Python regression tests.', out.getvalue())
65
66    def test_timeout(self):
67        ns = libregrtest._parse_args(['--timeout', '4.2'])
68        self.assertEqual(ns.timeout, 4.2)
69        self.checkError(['--timeout'], 'expected one argument')
70        self.checkError(['--timeout', 'foo'], 'invalid float value')
71
72    def test_wait(self):
73        ns = libregrtest._parse_args(['--wait'])
74        self.assertTrue(ns.wait)
75
76    def test_worker_args(self):
77        ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
78        self.assertEqual(ns.worker_args, '[[], {}]')
79        self.checkError(['--worker-args'], 'expected one argument')
80
81    def test_start(self):
82        for opt in '-S', '--start':
83            with self.subTest(opt=opt):
84                ns = libregrtest._parse_args([opt, 'foo'])
85                self.assertEqual(ns.start, 'foo')
86                self.checkError([opt], 'expected one argument')
87
88    def test_verbose(self):
89        ns = libregrtest._parse_args(['-v'])
90        self.assertEqual(ns.verbose, 1)
91        ns = libregrtest._parse_args(['-vvv'])
92        self.assertEqual(ns.verbose, 3)
93        ns = libregrtest._parse_args(['--verbose'])
94        self.assertEqual(ns.verbose, 1)
95        ns = libregrtest._parse_args(['--verbose'] * 3)
96        self.assertEqual(ns.verbose, 3)
97        ns = libregrtest._parse_args([])
98        self.assertEqual(ns.verbose, 0)
99
100    def test_verbose2(self):
101        for opt in '-w', '--verbose2':
102            with self.subTest(opt=opt):
103                ns = libregrtest._parse_args([opt])
104                self.assertTrue(ns.verbose2)
105
106    def test_verbose3(self):
107        for opt in '-W', '--verbose3':
108            with self.subTest(opt=opt):
109                ns = libregrtest._parse_args([opt])
110                self.assertTrue(ns.verbose3)
111
112    def test_quiet(self):
113        for opt in '-q', '--quiet':
114            with self.subTest(opt=opt):
115                ns = libregrtest._parse_args([opt])
116                self.assertTrue(ns.quiet)
117                self.assertEqual(ns.verbose, 0)
118
119    def test_slowest(self):
120        for opt in '-o', '--slowest':
121            with self.subTest(opt=opt):
122                ns = libregrtest._parse_args([opt])
123                self.assertTrue(ns.print_slow)
124
125    def test_header(self):
126        ns = libregrtest._parse_args(['--header'])
127        self.assertTrue(ns.header)
128
129        ns = libregrtest._parse_args(['--verbose'])
130        self.assertTrue(ns.header)
131
132    def test_randomize(self):
133        for opt in '-r', '--randomize':
134            with self.subTest(opt=opt):
135                ns = libregrtest._parse_args([opt])
136                self.assertTrue(ns.randomize)
137
138    def test_randseed(self):
139        ns = libregrtest._parse_args(['--randseed', '12345'])
140        self.assertEqual(ns.random_seed, 12345)
141        self.assertTrue(ns.randomize)
142        self.checkError(['--randseed'], 'expected one argument')
143        self.checkError(['--randseed', 'foo'], 'invalid int value')
144
145    def test_fromfile(self):
146        for opt in '-f', '--fromfile':
147            with self.subTest(opt=opt):
148                ns = libregrtest._parse_args([opt, 'foo'])
149                self.assertEqual(ns.fromfile, 'foo')
150                self.checkError([opt], 'expected one argument')
151                self.checkError([opt, 'foo', '-s'], "don't go together")
152
153    def test_exclude(self):
154        for opt in '-x', '--exclude':
155            with self.subTest(opt=opt):
156                ns = libregrtest._parse_args([opt])
157                self.assertTrue(ns.exclude)
158
159    def test_single(self):
160        for opt in '-s', '--single':
161            with self.subTest(opt=opt):
162                ns = libregrtest._parse_args([opt])
163                self.assertTrue(ns.single)
164                self.checkError([opt, '-f', 'foo'], "don't go together")
165
166    def test_ignore(self):
167        for opt in '-i', '--ignore':
168            with self.subTest(opt=opt):
169                ns = libregrtest._parse_args([opt, 'pattern'])
170                self.assertEqual(ns.ignore_tests, ['pattern'])
171                self.checkError([opt], 'expected one argument')
172
173        self.addCleanup(os_helper.unlink, os_helper.TESTFN)
174        with open(os_helper.TESTFN, "w") as fp:
175            print('matchfile1', file=fp)
176            print('matchfile2', file=fp)
177
178        filename = os.path.abspath(os_helper.TESTFN)
179        ns = libregrtest._parse_args(['-m', 'match',
180                                      '--ignorefile', filename])
181        self.assertEqual(ns.ignore_tests,
182                         ['matchfile1', 'matchfile2'])
183
184    def test_match(self):
185        for opt in '-m', '--match':
186            with self.subTest(opt=opt):
187                ns = libregrtest._parse_args([opt, 'pattern'])
188                self.assertEqual(ns.match_tests, ['pattern'])
189                self.checkError([opt], 'expected one argument')
190
191        ns = libregrtest._parse_args(['-m', 'pattern1',
192                                      '-m', 'pattern2'])
193        self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
194
195        self.addCleanup(os_helper.unlink, os_helper.TESTFN)
196        with open(os_helper.TESTFN, "w") as fp:
197            print('matchfile1', file=fp)
198            print('matchfile2', file=fp)
199
200        filename = os.path.abspath(os_helper.TESTFN)
201        ns = libregrtest._parse_args(['-m', 'match',
202                                      '--matchfile', filename])
203        self.assertEqual(ns.match_tests,
204                         ['match', 'matchfile1', 'matchfile2'])
205
206    def test_failfast(self):
207        for opt in '-G', '--failfast':
208            with self.subTest(opt=opt):
209                ns = libregrtest._parse_args([opt, '-v'])
210                self.assertTrue(ns.failfast)
211                ns = libregrtest._parse_args([opt, '-W'])
212                self.assertTrue(ns.failfast)
213                self.checkError([opt], '-G/--failfast needs either -v or -W')
214
215    def test_use(self):
216        for opt in '-u', '--use':
217            with self.subTest(opt=opt):
218                ns = libregrtest._parse_args([opt, 'gui,network'])
219                self.assertEqual(ns.use_resources, ['gui', 'network'])
220
221                ns = libregrtest._parse_args([opt, 'gui,none,network'])
222                self.assertEqual(ns.use_resources, ['network'])
223
224                expected = list(libregrtest.ALL_RESOURCES)
225                expected.remove('gui')
226                ns = libregrtest._parse_args([opt, 'all,-gui'])
227                self.assertEqual(ns.use_resources, expected)
228                self.checkError([opt], 'expected one argument')
229                self.checkError([opt, 'foo'], 'invalid resource')
230
231                # all + a resource not part of "all"
232                ns = libregrtest._parse_args([opt, 'all,tzdata'])
233                self.assertEqual(ns.use_resources,
234                                 list(libregrtest.ALL_RESOURCES) + ['tzdata'])
235
236                # test another resource which is not part of "all"
237                ns = libregrtest._parse_args([opt, 'extralargefile'])
238                self.assertEqual(ns.use_resources, ['extralargefile'])
239
240    def test_memlimit(self):
241        for opt in '-M', '--memlimit':
242            with self.subTest(opt=opt):
243                ns = libregrtest._parse_args([opt, '4G'])
244                self.assertEqual(ns.memlimit, '4G')
245                self.checkError([opt], 'expected one argument')
246
247    def test_testdir(self):
248        ns = libregrtest._parse_args(['--testdir', 'foo'])
249        self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
250        self.checkError(['--testdir'], 'expected one argument')
251
252    def test_runleaks(self):
253        for opt in '-L', '--runleaks':
254            with self.subTest(opt=opt):
255                ns = libregrtest._parse_args([opt])
256                self.assertTrue(ns.runleaks)
257
258    def test_huntrleaks(self):
259        for opt in '-R', '--huntrleaks':
260            with self.subTest(opt=opt):
261                ns = libregrtest._parse_args([opt, ':'])
262                self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
263                ns = libregrtest._parse_args([opt, '6:'])
264                self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
265                ns = libregrtest._parse_args([opt, ':3'])
266                self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
267                ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
268                self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
269                self.checkError([opt], 'expected one argument')
270                self.checkError([opt, '6'],
271                                'needs 2 or 3 colon-separated arguments')
272                self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
273                self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
274
275    def test_multiprocess(self):
276        for opt in '-j', '--multiprocess':
277            with self.subTest(opt=opt):
278                ns = libregrtest._parse_args([opt, '2'])
279                self.assertEqual(ns.use_mp, 2)
280                self.checkError([opt], 'expected one argument')
281                self.checkError([opt, 'foo'], 'invalid int value')
282                self.checkError([opt, '2', '-T'], "don't go together")
283                self.checkError([opt, '0', '-T'], "don't go together")
284
285    def test_coverage(self):
286        for opt in '-T', '--coverage':
287            with self.subTest(opt=opt):
288                ns = libregrtest._parse_args([opt])
289                self.assertTrue(ns.trace)
290
291    def test_coverdir(self):
292        for opt in '-D', '--coverdir':
293            with self.subTest(opt=opt):
294                ns = libregrtest._parse_args([opt, 'foo'])
295                self.assertEqual(ns.coverdir,
296                                 os.path.join(os_helper.SAVEDCWD, 'foo'))
297                self.checkError([opt], 'expected one argument')
298
299    def test_nocoverdir(self):
300        for opt in '-N', '--nocoverdir':
301            with self.subTest(opt=opt):
302                ns = libregrtest._parse_args([opt])
303                self.assertIsNone(ns.coverdir)
304
305    def test_threshold(self):
306        for opt in '-t', '--threshold':
307            with self.subTest(opt=opt):
308                ns = libregrtest._parse_args([opt, '1000'])
309                self.assertEqual(ns.threshold, 1000)
310                self.checkError([opt], 'expected one argument')
311                self.checkError([opt, 'foo'], 'invalid int value')
312
313    def test_nowindows(self):
314        for opt in '-n', '--nowindows':
315            with self.subTest(opt=opt):
316                with contextlib.redirect_stderr(io.StringIO()) as stderr:
317                    ns = libregrtest._parse_args([opt])
318                self.assertTrue(ns.nowindows)
319                err = stderr.getvalue()
320                self.assertIn('the --nowindows (-n) option is deprecated', err)
321
322    def test_forever(self):
323        for opt in '-F', '--forever':
324            with self.subTest(opt=opt):
325                ns = libregrtest._parse_args([opt])
326                self.assertTrue(ns.forever)
327
328    def test_unrecognized_argument(self):
329        self.checkError(['--xxx'], 'usage:')
330
331    def test_long_option__partial(self):
332        ns = libregrtest._parse_args(['--qui'])
333        self.assertTrue(ns.quiet)
334        self.assertEqual(ns.verbose, 0)
335
336    def test_two_options(self):
337        ns = libregrtest._parse_args(['--quiet', '--exclude'])
338        self.assertTrue(ns.quiet)
339        self.assertEqual(ns.verbose, 0)
340        self.assertTrue(ns.exclude)
341
342    def test_option_with_empty_string_value(self):
343        ns = libregrtest._parse_args(['--start', ''])
344        self.assertEqual(ns.start, '')
345
346    def test_arg(self):
347        ns = libregrtest._parse_args(['foo'])
348        self.assertEqual(ns.args, ['foo'])
349
350    def test_option_and_arg(self):
351        ns = libregrtest._parse_args(['--quiet', 'foo'])
352        self.assertTrue(ns.quiet)
353        self.assertEqual(ns.verbose, 0)
354        self.assertEqual(ns.args, ['foo'])
355
356    def test_arg_option_arg(self):
357        ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
358        self.assertEqual(ns.verbose, 1)
359        self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
360
361    def test_unknown_option(self):
362        self.checkError(['--unknown-option'],
363                        'unrecognized arguments: --unknown-option')
364
365
366class BaseTestCase(unittest.TestCase):
367    TEST_UNIQUE_ID = 1
368    TESTNAME_PREFIX = 'test_regrtest_'
369    TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
370
371    def setUp(self):
372        self.testdir = os.path.realpath(os.path.dirname(__file__))
373
374        self.tmptestdir = tempfile.mkdtemp()
375        self.addCleanup(os_helper.rmtree, self.tmptestdir)
376
377    def create_test(self, name=None, code=None):
378        if not name:
379            name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
380            BaseTestCase.TEST_UNIQUE_ID += 1
381
382        if code is None:
383            code = textwrap.dedent("""
384                    import unittest
385
386                    class Tests(unittest.TestCase):
387                        def test_empty_test(self):
388                            pass
389                """)
390
391        # test_regrtest cannot be run twice in parallel because
392        # of setUp() and create_test()
393        name = self.TESTNAME_PREFIX + name
394        path = os.path.join(self.tmptestdir, name + '.py')
395
396        self.addCleanup(os_helper.unlink, path)
397        # Use 'x' mode to ensure that we do not override existing tests
398        try:
399            with open(path, 'x', encoding='utf-8') as fp:
400                fp.write(code)
401        except PermissionError as exc:
402            if not sysconfig.is_python_build():
403                self.skipTest("cannot write %s: %s" % (path, exc))
404            raise
405        return name
406
407    def regex_search(self, regex, output):
408        match = re.search(regex, output, re.MULTILINE)
409        if not match:
410            self.fail("%r not found in %r" % (regex, output))
411        return match
412
413    def check_line(self, output, regex):
414        regex = re.compile(r'^' + regex, re.MULTILINE)
415        self.assertRegex(output, regex)
416
417    def parse_executed_tests(self, output):
418        regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
419                 % (LOG_PREFIX, self.TESTNAME_REGEX))
420        parser = re.finditer(regex, output, re.MULTILINE)
421        return list(match.group(1) for match in parser)
422
423    def check_executed_tests(self, output, tests, skipped=(), failed=(),
424                             env_changed=(), omitted=(),
425                             rerun={}, no_test_ran=(),
426                             randomize=False, interrupted=False,
427                             fail_env_changed=False):
428        if isinstance(tests, str):
429            tests = [tests]
430        if isinstance(skipped, str):
431            skipped = [skipped]
432        if isinstance(failed, str):
433            failed = [failed]
434        if isinstance(env_changed, str):
435            env_changed = [env_changed]
436        if isinstance(omitted, str):
437            omitted = [omitted]
438        if isinstance(no_test_ran, str):
439            no_test_ran = [no_test_ran]
440
441        executed = self.parse_executed_tests(output)
442        if randomize:
443            self.assertEqual(set(executed), set(tests), output)
444        else:
445            self.assertEqual(executed, tests, output)
446
447        def plural(count):
448            return 's' if count != 1 else ''
449
450        def list_regex(line_format, tests):
451            count = len(tests)
452            names = ' '.join(sorted(tests))
453            regex = line_format % (count, plural(count))
454            regex = r'%s:\n    %s$' % (regex, names)
455            return regex
456
457        if skipped:
458            regex = list_regex('%s test%s skipped', skipped)
459            self.check_line(output, regex)
460
461        if failed:
462            regex = list_regex('%s test%s failed', failed)
463            self.check_line(output, regex)
464
465        if env_changed:
466            regex = list_regex('%s test%s altered the execution environment',
467                               env_changed)
468            self.check_line(output, regex)
469
470        if omitted:
471            regex = list_regex('%s test%s omitted', omitted)
472            self.check_line(output, regex)
473
474        if rerun:
475            regex = list_regex('%s re-run test%s', rerun.keys())
476            self.check_line(output, regex)
477            regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
478            self.check_line(output, regex)
479            for name, match in rerun.items():
480                regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
481                self.check_line(output, regex)
482
483        if no_test_ran:
484            regex = list_regex('%s test%s run no tests', no_test_ran)
485            self.check_line(output, regex)
486
487        good = (len(tests) - len(skipped) - len(failed)
488                - len(omitted) - len(env_changed) - len(no_test_ran))
489        if good:
490            regex = r'%s test%s OK\.$' % (good, plural(good))
491            if not skipped and not failed and good > 1:
492                regex = 'All %s' % regex
493            self.check_line(output, regex)
494
495        if interrupted:
496            self.check_line(output, 'Test suite interrupted by signal SIGINT.')
497
498        result = []
499        if failed:
500            result.append('FAILURE')
501        elif fail_env_changed and env_changed:
502            result.append('ENV CHANGED')
503        if interrupted:
504            result.append('INTERRUPTED')
505        if not any((good, result, failed, interrupted, skipped,
506                    env_changed, fail_env_changed)):
507            result.append("NO TEST RUN")
508        elif not result:
509            result.append('SUCCESS')
510        result = ', '.join(result)
511        if rerun:
512            self.check_line(output, 'Tests result: FAILURE')
513            result = 'FAILURE then %s' % result
514
515        self.check_line(output, 'Tests result: %s' % result)
516
517    def parse_random_seed(self, output):
518        match = self.regex_search(r'Using random seed ([0-9]+)', output)
519        randseed = int(match.group(1))
520        self.assertTrue(0 <= randseed <= 10000000, randseed)
521        return randseed
522
523    def run_command(self, args, input=None, exitcode=0, **kw):
524        if not input:
525            input = ''
526        if 'stderr' not in kw:
527            kw['stderr'] = subprocess.STDOUT
528        proc = subprocess.run(args,
529                              universal_newlines=True,
530                              input=input,
531                              stdout=subprocess.PIPE,
532                              **kw)
533        if proc.returncode != exitcode:
534            msg = ("Command %s failed with exit code %s\n"
535                   "\n"
536                   "stdout:\n"
537                   "---\n"
538                   "%s\n"
539                   "---\n"
540                   % (str(args), proc.returncode, proc.stdout))
541            if proc.stderr:
542                msg += ("\n"
543                        "stderr:\n"
544                        "---\n"
545                        "%s"
546                        "---\n"
547                        % proc.stderr)
548            self.fail(msg)
549        return proc
550
551    def run_python(self, args, **kw):
552        args = [sys.executable, '-X', 'faulthandler', '-I', *args]
553        proc = self.run_command(args, **kw)
554        return proc.stdout
555
556
557class CheckActualTests(BaseTestCase):
558    def test_finds_expected_number_of_tests(self):
559        """
560        Check that regrtest appears to find the expected set of tests.
561        """
562        args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
563        output = self.run_python(args)
564        rough_number_of_tests_found = len(output.splitlines())
565        actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
566                                             'test*.py')
567        rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
568        # We're not trying to duplicate test finding logic in here,
569        # just give a rough estimate of how many there should be and
570        # be near that.  This is a regression test to prevent mishaps
571        # such as https://bugs.python.org/issue37667 in the future.
572        # If you need to change the values in here during some
573        # mythical future test suite reorganization, don't go
574        # overboard with logic and keep that goal in mind.
575        self.assertGreater(rough_number_of_tests_found,
576                           rough_counted_test_py_files*9//10,
577                           msg='Unexpectedly low number of tests found in:\n'
578                           f'{", ".join(output.splitlines())}')
579
580
581class ProgramsTestCase(BaseTestCase):
582    """
583    Test various ways to run the Python test suite. Use options close
584    to options used on the buildbot.
585    """
586
587    NTEST = 4
588
589    def setUp(self):
590        super().setUp()
591
592        # Create NTEST tests doing nothing
593        self.tests = [self.create_test() for index in range(self.NTEST)]
594
595        self.python_args = ['-Wd', '-E', '-bb']
596        self.regrtest_args = ['-uall', '-rwW',
597                              '--testdir=%s' % self.tmptestdir]
598        self.regrtest_args.extend(('--timeout', '3600', '-j4'))
599        if sys.platform == 'win32':
600            self.regrtest_args.append('-n')
601
602    def check_output(self, output):
603        self.parse_random_seed(output)
604        self.check_executed_tests(output, self.tests, randomize=True)
605
606    def run_tests(self, args):
607        output = self.run_python(args)
608        self.check_output(output)
609
610    def test_script_regrtest(self):
611        # Lib/test/regrtest.py
612        script = os.path.join(self.testdir, 'regrtest.py')
613
614        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
615        self.run_tests(args)
616
617    def test_module_test(self):
618        # -m test
619        args = [*self.python_args, '-m', 'test',
620                *self.regrtest_args, *self.tests]
621        self.run_tests(args)
622
623    def test_module_regrtest(self):
624        # -m test.regrtest
625        args = [*self.python_args, '-m', 'test.regrtest',
626                *self.regrtest_args, *self.tests]
627        self.run_tests(args)
628
629    def test_module_autotest(self):
630        # -m test.autotest
631        args = [*self.python_args, '-m', 'test.autotest',
632                *self.regrtest_args, *self.tests]
633        self.run_tests(args)
634
635    def test_module_from_test_autotest(self):
636        # from test import autotest
637        code = 'from test import autotest'
638        args = [*self.python_args, '-c', code,
639                *self.regrtest_args, *self.tests]
640        self.run_tests(args)
641
642    def test_script_autotest(self):
643        # Lib/test/autotest.py
644        script = os.path.join(self.testdir, 'autotest.py')
645        args = [*self.python_args, script, *self.regrtest_args, *self.tests]
646        self.run_tests(args)
647
648    @unittest.skipUnless(sysconfig.is_python_build(),
649                         'run_tests.py script is not installed')
650    def test_tools_script_run_tests(self):
651        # Tools/scripts/run_tests.py
652        script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
653        args = [script, *self.regrtest_args, *self.tests]
654        self.run_tests(args)
655
656    def run_batch(self, *args):
657        proc = self.run_command(args)
658        self.check_output(proc.stdout)
659
660    @unittest.skipUnless(sysconfig.is_python_build(),
661                         'test.bat script is not installed')
662    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
663    def test_tools_buildbot_test(self):
664        # Tools\buildbot\test.bat
665        script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
666        test_args = ['--testdir=%s' % self.tmptestdir]
667        if platform.machine() == 'ARM64':
668            test_args.append('-arm64') # ARM 64-bit build
669        elif platform.machine() == 'ARM':
670            test_args.append('-arm32')   # 32-bit ARM build
671        elif platform.architecture()[0] == '64bit':
672            test_args.append('-x64')   # 64-bit build
673        if not Py_DEBUG:
674            test_args.append('+d')     # Release build, use python.exe
675        self.run_batch(script, *test_args, *self.tests)
676
677    @unittest.skipUnless(sys.platform == 'win32', 'Windows only')
678    def test_pcbuild_rt(self):
679        # PCbuild\rt.bat
680        script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
681        if not os.path.isfile(script):
682            self.skipTest(f'File "{script}" does not exist')
683        rt_args = ["-q"]             # Quick, don't run tests twice
684        if platform.machine() == 'ARM64':
685            rt_args.append('-arm64') # ARM 64-bit build
686        elif platform.machine() == 'ARM':
687            rt_args.append('-arm32')   # 32-bit ARM build
688        elif platform.architecture()[0] == '64bit':
689            rt_args.append('-x64')   # 64-bit build
690        if Py_DEBUG:
691            rt_args.append('-d')     # Debug build, use python_d.exe
692        self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
693
694
695class ArgsTestCase(BaseTestCase):
696    """
697    Test arguments of the Python test suite.
698    """
699
700    def run_tests(self, *testargs, **kw):
701        cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
702        return self.run_python(cmdargs, **kw)
703
704    def test_failing_test(self):
705        # test a failing test
706        code = textwrap.dedent("""
707            import unittest
708
709            class FailingTest(unittest.TestCase):
710                def test_failing(self):
711                    self.fail("bug")
712        """)
713        test_ok = self.create_test('ok')
714        test_failing = self.create_test('failing', code=code)
715        tests = [test_ok, test_failing]
716
717        output = self.run_tests(*tests, exitcode=2)
718        self.check_executed_tests(output, tests, failed=test_failing)
719
720    def test_resources(self):
721        # test -u command line option
722        tests = {}
723        for resource in ('audio', 'network'):
724            code = textwrap.dedent("""
725                        from test import support; support.requires(%r)
726                        import unittest
727                        class PassingTest(unittest.TestCase):
728                            def test_pass(self):
729                                pass
730                    """ % resource)
731
732            tests[resource] = self.create_test(resource, code)
733        test_names = sorted(tests.values())
734
735        # -u all: 2 resources enabled
736        output = self.run_tests('-u', 'all', *test_names)
737        self.check_executed_tests(output, test_names)
738
739        # -u audio: 1 resource enabled
740        output = self.run_tests('-uaudio', *test_names)
741        self.check_executed_tests(output, test_names,
742                                  skipped=tests['network'])
743
744        # no option: 0 resources enabled
745        output = self.run_tests(*test_names)
746        self.check_executed_tests(output, test_names,
747                                  skipped=test_names)
748
749    def test_random(self):
750        # test -r and --randseed command line option
751        code = textwrap.dedent("""
752            import random
753            print("TESTRANDOM: %s" % random.randint(1, 1000))
754        """)
755        test = self.create_test('random', code)
756
757        # first run to get the output with the random seed
758        output = self.run_tests('-r', test)
759        randseed = self.parse_random_seed(output)
760        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
761        test_random = int(match.group(1))
762
763        # try to reproduce with the random seed
764        output = self.run_tests('-r', '--randseed=%s' % randseed, test)
765        randseed2 = self.parse_random_seed(output)
766        self.assertEqual(randseed2, randseed)
767
768        match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
769        test_random2 = int(match.group(1))
770        self.assertEqual(test_random2, test_random)
771
772    def test_fromfile(self):
773        # test --fromfile
774        tests = [self.create_test() for index in range(5)]
775
776        # Write the list of files using a format similar to regrtest output:
777        # [1/2] test_1
778        # [2/2] test_2
779        filename = os_helper.TESTFN
780        self.addCleanup(os_helper.unlink, filename)
781
782        # test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
783        with open(filename, "w") as fp:
784            previous = None
785            for index, name in enumerate(tests, 1):
786                line = ("00:00:%02i [%s/%s] %s"
787                        % (index, index, len(tests), name))
788                if previous:
789                    line += " -- %s took 0 sec" % previous
790                print(line, file=fp)
791                previous = name
792
793        output = self.run_tests('--fromfile', filename)
794        self.check_executed_tests(output, tests)
795
796        # test format '[2/7] test_opcodes'
797        with open(filename, "w") as fp:
798            for index, name in enumerate(tests, 1):
799                print("[%s/%s] %s" % (index, len(tests), name), file=fp)
800
801        output = self.run_tests('--fromfile', filename)
802        self.check_executed_tests(output, tests)
803
804        # test format 'test_opcodes'
805        with open(filename, "w") as fp:
806            for name in tests:
807                print(name, file=fp)
808
809        output = self.run_tests('--fromfile', filename)
810        self.check_executed_tests(output, tests)
811
812        # test format 'Lib/test/test_opcodes.py'
813        with open(filename, "w") as fp:
814            for name in tests:
815                print('Lib/test/%s.py' % name, file=fp)
816
817        output = self.run_tests('--fromfile', filename)
818        self.check_executed_tests(output, tests)
819
820    def test_interrupted(self):
821        code = TEST_INTERRUPTED
822        test = self.create_test('sigint', code=code)
823        output = self.run_tests(test, exitcode=130)
824        self.check_executed_tests(output, test, omitted=test,
825                                  interrupted=True)
826
827    def test_slowest(self):
828        # test --slowest
829        tests = [self.create_test() for index in range(3)]
830        output = self.run_tests("--slowest", *tests)
831        self.check_executed_tests(output, tests)
832        regex = ('10 slowest tests:\n'
833                 '(?:- %s: .*\n){%s}'
834                 % (self.TESTNAME_REGEX, len(tests)))
835        self.check_line(output, regex)
836
837    def test_slowest_interrupted(self):
838        # Issue #25373: test --slowest with an interrupted test
839        code = TEST_INTERRUPTED
840        test = self.create_test("sigint", code=code)
841
842        for multiprocessing in (False, True):
843            with self.subTest(multiprocessing=multiprocessing):
844                if multiprocessing:
845                    args = ("--slowest", "-j2", test)
846                else:
847                    args = ("--slowest", test)
848                output = self.run_tests(*args, exitcode=130)
849                self.check_executed_tests(output, test,
850                                          omitted=test, interrupted=True)
851
852                regex = ('10 slowest tests:\n')
853                self.check_line(output, regex)
854
855    def test_coverage(self):
856        # test --coverage
857        test = self.create_test('coverage')
858        output = self.run_tests("--coverage", test)
859        self.check_executed_tests(output, [test])
860        regex = (r'lines +cov% +module +\(path\)\n'
861                 r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
862        self.check_line(output, regex)
863
864    def test_wait(self):
865        # test --wait
866        test = self.create_test('wait')
867        output = self.run_tests("--wait", test, input='key')
868        self.check_line(output, 'Press any key to continue')
869
870    def test_forever(self):
871        # test --forever
872        code = textwrap.dedent("""
873            import builtins
874            import unittest
875
876            class ForeverTester(unittest.TestCase):
877                def test_run(self):
878                    # Store the state in the builtins module, because the test
879                    # module is reload at each run
880                    if 'RUN' in builtins.__dict__:
881                        builtins.__dict__['RUN'] += 1
882                        if builtins.__dict__['RUN'] >= 3:
883                            self.fail("fail at the 3rd runs")
884                    else:
885                        builtins.__dict__['RUN'] = 1
886        """)
887        test = self.create_test('forever', code=code)
888        output = self.run_tests('--forever', test, exitcode=2)
889        self.check_executed_tests(output, [test]*3, failed=test)
890
891    def check_leak(self, code, what):
892        test = self.create_test('huntrleaks', code=code)
893
894        filename = 'reflog.txt'
895        self.addCleanup(os_helper.unlink, filename)
896        output = self.run_tests('--huntrleaks', '3:3:', test,
897                                exitcode=2,
898                                stderr=subprocess.STDOUT)
899        self.check_executed_tests(output, [test], failed=test)
900
901        line = 'beginning 6 repetitions\n123456\n......\n'
902        self.check_line(output, re.escape(line))
903
904        line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
905        self.assertIn(line2, output)
906
907        with open(filename) as fp:
908            reflog = fp.read()
909            self.assertIn(line2, reflog)
910
911    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
912    def test_huntrleaks(self):
913        # test --huntrleaks
914        code = textwrap.dedent("""
915            import unittest
916
917            GLOBAL_LIST = []
918
919            class RefLeakTest(unittest.TestCase):
920                def test_leak(self):
921                    GLOBAL_LIST.append(object())
922        """)
923        self.check_leak(code, 'references')
924
925    @unittest.skipUnless(Py_DEBUG, 'need a debug build')
926    def test_huntrleaks_fd_leak(self):
927        # test --huntrleaks for file descriptor leak
928        code = textwrap.dedent("""
929            import os
930            import unittest
931
932            class FDLeakTest(unittest.TestCase):
933                def test_leak(self):
934                    fd = os.open(__file__, os.O_RDONLY)
935                    # bug: never close the file descriptor
936        """)
937        self.check_leak(code, 'file descriptors')
938
939    def test_list_tests(self):
940        # test --list-tests
941        tests = [self.create_test() for i in range(5)]
942        output = self.run_tests('--list-tests', *tests)
943        self.assertEqual(output.rstrip().splitlines(),
944                         tests)
945
946    def test_list_cases(self):
947        # test --list-cases
948        code = textwrap.dedent("""
949            import unittest
950
951            class Tests(unittest.TestCase):
952                def test_method1(self):
953                    pass
954                def test_method2(self):
955                    pass
956        """)
957        testname = self.create_test(code=code)
958
959        # Test --list-cases
960        all_methods = ['%s.Tests.test_method1' % testname,
961                       '%s.Tests.test_method2' % testname]
962        output = self.run_tests('--list-cases', testname)
963        self.assertEqual(output.splitlines(), all_methods)
964
965        # Test --list-cases with --match
966        all_methods = ['%s.Tests.test_method1' % testname]
967        output = self.run_tests('--list-cases',
968                                '-m', 'test_method1',
969                                testname)
970        self.assertEqual(output.splitlines(), all_methods)
971
972    @support.cpython_only
973    def test_crashed(self):
974        # Any code which causes a crash
975        code = 'import faulthandler; faulthandler._sigsegv()'
976        crash_test = self.create_test(name="crash", code=code)
977
978        tests = [crash_test]
979        output = self.run_tests("-j2", *tests, exitcode=2)
980        self.check_executed_tests(output, tests, failed=crash_test,
981                                  randomize=True)
982
983    def parse_methods(self, output):
984        regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
985        return [match.group(1) for match in regex.finditer(output)]
986
987    def test_ignorefile(self):
988        code = textwrap.dedent("""
989            import unittest
990
991            class Tests(unittest.TestCase):
992                def test_method1(self):
993                    pass
994                def test_method2(self):
995                    pass
996                def test_method3(self):
997                    pass
998                def test_method4(self):
999                    pass
1000        """)
1001        all_methods = ['test_method1', 'test_method2',
1002                       'test_method3', 'test_method4']
1003        testname = self.create_test(code=code)
1004
1005        # only run a subset
1006        filename = os_helper.TESTFN
1007        self.addCleanup(os_helper.unlink, filename)
1008
1009        subset = [
1010            # only ignore the method name
1011            'test_method1',
1012            # ignore the full identifier
1013            '%s.Tests.test_method3' % testname]
1014        with open(filename, "w") as fp:
1015            for name in subset:
1016                print(name, file=fp)
1017
1018        output = self.run_tests("-v", "--ignorefile", filename, testname)
1019        methods = self.parse_methods(output)
1020        subset = ['test_method2', 'test_method4']
1021        self.assertEqual(methods, subset)
1022
1023    def test_matchfile(self):
1024        code = textwrap.dedent("""
1025            import unittest
1026
1027            class Tests(unittest.TestCase):
1028                def test_method1(self):
1029                    pass
1030                def test_method2(self):
1031                    pass
1032                def test_method3(self):
1033                    pass
1034                def test_method4(self):
1035                    pass
1036        """)
1037        all_methods = ['test_method1', 'test_method2',
1038                       'test_method3', 'test_method4']
1039        testname = self.create_test(code=code)
1040
1041        # by default, all methods should be run
1042        output = self.run_tests("-v", testname)
1043        methods = self.parse_methods(output)
1044        self.assertEqual(methods, all_methods)
1045
1046        # only run a subset
1047        filename = os_helper.TESTFN
1048        self.addCleanup(os_helper.unlink, filename)
1049
1050        subset = [
1051            # only match the method name
1052            'test_method1',
1053            # match the full identifier
1054            '%s.Tests.test_method3' % testname]
1055        with open(filename, "w") as fp:
1056            for name in subset:
1057                print(name, file=fp)
1058
1059        output = self.run_tests("-v", "--matchfile", filename, testname)
1060        methods = self.parse_methods(output)
1061        subset = ['test_method1', 'test_method3']
1062        self.assertEqual(methods, subset)
1063
1064    def test_env_changed(self):
1065        code = textwrap.dedent("""
1066            import unittest
1067
1068            class Tests(unittest.TestCase):
1069                def test_env_changed(self):
1070                    open("env_changed", "w").close()
1071        """)
1072        testname = self.create_test(code=code)
1073
1074        # don't fail by default
1075        output = self.run_tests(testname)
1076        self.check_executed_tests(output, [testname], env_changed=testname)
1077
1078        # fail with --fail-env-changed
1079        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1080        self.check_executed_tests(output, [testname], env_changed=testname,
1081                                  fail_env_changed=True)
1082
1083    def test_rerun_fail(self):
1084        # FAILURE then FAILURE
1085        code = textwrap.dedent("""
1086            import unittest
1087
1088            class Tests(unittest.TestCase):
1089                def test_succeed(self):
1090                    return
1091
1092                def test_fail_always(self):
1093                    # test that always fails
1094                    self.fail("bug")
1095        """)
1096        testname = self.create_test(code=code)
1097
1098        output = self.run_tests("-w", testname, exitcode=2)
1099        self.check_executed_tests(output, [testname],
1100                                  failed=testname, rerun={testname: "test_fail_always"})
1101
1102    def test_rerun_success(self):
1103        # FAILURE then SUCCESS
1104        code = textwrap.dedent("""
1105            import builtins
1106            import unittest
1107
1108            class Tests(unittest.TestCase):
1109                def test_succeed(self):
1110                    return
1111
1112                def test_fail_once(self):
1113                    if not hasattr(builtins, '_test_failed'):
1114                        builtins._test_failed = True
1115                        self.fail("bug")
1116        """)
1117        testname = self.create_test(code=code)
1118
1119        output = self.run_tests("-w", testname, exitcode=0)
1120        self.check_executed_tests(output, [testname],
1121                                  rerun={testname: "test_fail_once"})
1122
1123    def test_rerun_setup_class_hook_failure(self):
1124        # FAILURE then FAILURE
1125        code = textwrap.dedent("""
1126            import unittest
1127
1128            class ExampleTests(unittest.TestCase):
1129                @classmethod
1130                def setUpClass(self):
1131                    raise RuntimeError('Fail')
1132
1133                def test_success(self):
1134                    return
1135        """)
1136        testname = self.create_test(code=code)
1137
1138        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1139        self.check_executed_tests(output, testname,
1140                                  failed=[testname],
1141                                  rerun={testname: "ExampleTests"})
1142
1143    def test_rerun_teardown_class_hook_failure(self):
1144        # FAILURE then FAILURE
1145        code = textwrap.dedent("""
1146            import unittest
1147
1148            class ExampleTests(unittest.TestCase):
1149                @classmethod
1150                def tearDownClass(self):
1151                    raise RuntimeError('Fail')
1152
1153                def test_success(self):
1154                    return
1155        """)
1156        testname = self.create_test(code=code)
1157
1158        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1159        self.check_executed_tests(output, testname,
1160                                  failed=[testname],
1161                                  rerun={testname: "ExampleTests"})
1162
1163    def test_rerun_setup_module_hook_failure(self):
1164        # FAILURE then FAILURE
1165        code = textwrap.dedent("""
1166            import unittest
1167
1168            def setUpModule():
1169                raise RuntimeError('Fail')
1170
1171            class ExampleTests(unittest.TestCase):
1172                def test_success(self):
1173                    return
1174        """)
1175        testname = self.create_test(code=code)
1176
1177        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1178        self.check_executed_tests(output, testname,
1179                                  failed=[testname],
1180                                  rerun={testname: testname})
1181
1182    def test_rerun_teardown_module_hook_failure(self):
1183        # FAILURE then FAILURE
1184        code = textwrap.dedent("""
1185            import unittest
1186
1187            def tearDownModule():
1188                raise RuntimeError('Fail')
1189
1190            class ExampleTests(unittest.TestCase):
1191                def test_success(self):
1192                    return
1193        """)
1194        testname = self.create_test(code=code)
1195
1196        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1197        self.check_executed_tests(output, testname,
1198                                  failed=[testname],
1199                                  rerun={testname: testname})
1200
1201    def test_rerun_setup_hook_failure(self):
1202        # FAILURE then FAILURE
1203        code = textwrap.dedent("""
1204            import unittest
1205
1206            class ExampleTests(unittest.TestCase):
1207                def setUp(self):
1208                    raise RuntimeError('Fail')
1209
1210                def test_success(self):
1211                    return
1212        """)
1213        testname = self.create_test(code=code)
1214
1215        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1216        self.check_executed_tests(output, testname,
1217                                  failed=[testname],
1218                                  rerun={testname: "test_success"})
1219
1220    def test_rerun_teardown_hook_failure(self):
1221        # FAILURE then FAILURE
1222        code = textwrap.dedent("""
1223            import unittest
1224
1225            class ExampleTests(unittest.TestCase):
1226                def tearDown(self):
1227                    raise RuntimeError('Fail')
1228
1229                def test_success(self):
1230                    return
1231        """)
1232        testname = self.create_test(code=code)
1233
1234        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1235        self.check_executed_tests(output, testname,
1236                                  failed=[testname],
1237                                  rerun={testname: "test_success"})
1238
1239    def test_rerun_async_setup_hook_failure(self):
1240        # FAILURE then FAILURE
1241        code = textwrap.dedent("""
1242            import unittest
1243
1244            class ExampleTests(unittest.IsolatedAsyncioTestCase):
1245                async def asyncSetUp(self):
1246                    raise RuntimeError('Fail')
1247
1248                async def test_success(self):
1249                    return
1250        """)
1251        testname = self.create_test(code=code)
1252
1253        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1254        self.check_executed_tests(output, testname,
1255                                  failed=[testname],
1256                                  rerun={testname: "test_success"})
1257
1258    def test_rerun_async_teardown_hook_failure(self):
1259        # FAILURE then FAILURE
1260        code = textwrap.dedent("""
1261            import unittest
1262
1263            class ExampleTests(unittest.IsolatedAsyncioTestCase):
1264                async def asyncTearDown(self):
1265                    raise RuntimeError('Fail')
1266
1267                async def test_success(self):
1268                    return
1269        """)
1270        testname = self.create_test(code=code)
1271
1272        output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
1273        self.check_executed_tests(output, testname,
1274                                  failed=[testname],
1275                                  rerun={testname: "test_success"})
1276
1277    def test_no_tests_ran(self):
1278        code = textwrap.dedent("""
1279            import unittest
1280
1281            class Tests(unittest.TestCase):
1282                def test_bug(self):
1283                    pass
1284        """)
1285        testname = self.create_test(code=code)
1286
1287        output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
1288        self.check_executed_tests(output, [testname], no_test_ran=testname)
1289
1290    def test_no_tests_ran_skip(self):
1291        code = textwrap.dedent("""
1292            import unittest
1293
1294            class Tests(unittest.TestCase):
1295                def test_skipped(self):
1296                    self.skipTest("because")
1297        """)
1298        testname = self.create_test(code=code)
1299
1300        output = self.run_tests(testname, exitcode=0)
1301        self.check_executed_tests(output, [testname])
1302
1303    def test_no_tests_ran_multiple_tests_nonexistent(self):
1304        code = textwrap.dedent("""
1305            import unittest
1306
1307            class Tests(unittest.TestCase):
1308                def test_bug(self):
1309                    pass
1310        """)
1311        testname = self.create_test(code=code)
1312        testname2 = self.create_test(code=code)
1313
1314        output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
1315        self.check_executed_tests(output, [testname, testname2],
1316                                  no_test_ran=[testname, testname2])
1317
1318    def test_no_test_ran_some_test_exist_some_not(self):
1319        code = textwrap.dedent("""
1320            import unittest
1321
1322            class Tests(unittest.TestCase):
1323                def test_bug(self):
1324                    pass
1325        """)
1326        testname = self.create_test(code=code)
1327        other_code = textwrap.dedent("""
1328            import unittest
1329
1330            class Tests(unittest.TestCase):
1331                def test_other_bug(self):
1332                    pass
1333        """)
1334        testname2 = self.create_test(code=other_code)
1335
1336        output = self.run_tests(testname, testname2, "-m", "nosuchtest",
1337                                "-m", "test_other_bug", exitcode=0)
1338        self.check_executed_tests(output, [testname, testname2],
1339                                  no_test_ran=[testname])
1340
1341    @support.cpython_only
1342    def test_uncollectable(self):
1343        code = textwrap.dedent(r"""
1344            import _testcapi
1345            import gc
1346            import unittest
1347
1348            @_testcapi.with_tp_del
1349            class Garbage:
1350                def __tp_del__(self):
1351                    pass
1352
1353            class Tests(unittest.TestCase):
1354                def test_garbage(self):
1355                    # create an uncollectable object
1356                    obj = Garbage()
1357                    obj.ref_cycle = obj
1358                    obj = None
1359        """)
1360        testname = self.create_test(code=code)
1361
1362        output = self.run_tests("--fail-env-changed", testname, exitcode=3)
1363        self.check_executed_tests(output, [testname],
1364                                  env_changed=[testname],
1365                                  fail_env_changed=True)
1366
1367    def test_multiprocessing_timeout(self):
1368        code = textwrap.dedent(r"""
1369            import time
1370            import unittest
1371            try:
1372                import faulthandler
1373            except ImportError:
1374                faulthandler = None
1375
1376            class Tests(unittest.TestCase):
1377                # test hangs and so should be stopped by the timeout
1378                def test_sleep(self):
1379                    # we want to test regrtest multiprocessing timeout,
1380                    # not faulthandler timeout
1381                    if faulthandler is not None:
1382                        faulthandler.cancel_dump_traceback_later()
1383
1384                    time.sleep(60 * 5)
1385        """)
1386        testname = self.create_test(code=code)
1387
1388        output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
1389        self.check_executed_tests(output, [testname],
1390                                  failed=testname)
1391        self.assertRegex(output,
1392                         re.compile('%s timed out' % testname, re.MULTILINE))
1393
1394    def test_unraisable_exc(self):
1395        # --fail-env-changed must catch unraisable exception.
1396        # The exception must be displayed even if sys.stderr is redirected.
1397        code = textwrap.dedent(r"""
1398            import unittest
1399            import weakref
1400            from test.support import captured_stderr
1401
1402            class MyObject:
1403                pass
1404
1405            def weakref_callback(obj):
1406                raise Exception("weakref callback bug")
1407
1408            class Tests(unittest.TestCase):
1409                def test_unraisable_exc(self):
1410                    obj = MyObject()
1411                    ref = weakref.ref(obj, weakref_callback)
1412                    with captured_stderr() as stderr:
1413                        # call weakref_callback() which logs
1414                        # an unraisable exception
1415                        obj = None
1416                    self.assertEqual(stderr.getvalue(), '')
1417        """)
1418        testname = self.create_test(code=code)
1419
1420        output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
1421        self.check_executed_tests(output, [testname],
1422                                  env_changed=[testname],
1423                                  fail_env_changed=True)
1424        self.assertIn("Warning -- Unraisable exception", output)
1425        self.assertIn("Exception: weakref callback bug", output)
1426
1427    def test_threading_excepthook(self):
1428        # --fail-env-changed must catch uncaught thread exception.
1429        # The exception must be displayed even if sys.stderr is redirected.
1430        code = textwrap.dedent(r"""
1431            import threading
1432            import unittest
1433            from test.support import captured_stderr
1434
1435            class MyObject:
1436                pass
1437
1438            def func_bug():
1439                raise Exception("bug in thread")
1440
1441            class Tests(unittest.TestCase):
1442                def test_threading_excepthook(self):
1443                    with captured_stderr() as stderr:
1444                        thread = threading.Thread(target=func_bug)
1445                        thread.start()
1446                        thread.join()
1447                    self.assertEqual(stderr.getvalue(), '')
1448        """)
1449        testname = self.create_test(code=code)
1450
1451        output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
1452        self.check_executed_tests(output, [testname],
1453                                  env_changed=[testname],
1454                                  fail_env_changed=True)
1455        self.assertIn("Warning -- Uncaught thread exception", output)
1456        self.assertIn("Exception: bug in thread", output)
1457
1458    def test_print_warning(self):
1459        # bpo-45410: The order of messages must be preserved when -W and
1460        # support.print_warning() are used.
1461        code = textwrap.dedent(r"""
1462            import sys
1463            import unittest
1464            from test import support
1465
1466            class MyObject:
1467                pass
1468
1469            def func_bug():
1470                raise Exception("bug in thread")
1471
1472            class Tests(unittest.TestCase):
1473                def test_print_warning(self):
1474                    print("msg1: stdout")
1475                    support.print_warning("msg2: print_warning")
1476                    # Fail with ENV CHANGED to see print_warning() log
1477                    support.environment_altered = True
1478        """)
1479        testname = self.create_test(code=code)
1480
1481        # Expect an output like:
1482        #
1483        #   test_threading_excepthook (test.test_x.Tests) ... msg1: stdout
1484        #   Warning -- msg2: print_warning
1485        #   ok
1486        regex = (r"test_print_warning.*msg1: stdout\n"
1487                 r"Warning -- msg2: print_warning\n"
1488                 r"ok\n")
1489        for option in ("-v", "-W"):
1490            with self.subTest(option=option):
1491                cmd = ["--fail-env-changed", option, testname]
1492                output = self.run_tests(*cmd, exitcode=3)
1493                self.check_executed_tests(output, [testname],
1494                                          env_changed=[testname],
1495                                          fail_env_changed=True)
1496                self.assertRegex(output, regex)
1497
1498    def test_unicode_guard_env(self):
1499        guard = os.environ.get(setup.UNICODE_GUARD_ENV)
1500        self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
1501        if guard.isascii():
1502            # Skip to signify that the env var value was changed by the user;
1503            # possibly to something ASCII to work around Unicode issues.
1504            self.skipTest("Modified guard")
1505
1506    def test_cleanup(self):
1507        dirname = os.path.join(self.tmptestdir, "test_python_123")
1508        os.mkdir(dirname)
1509        filename = os.path.join(self.tmptestdir, "test_python_456")
1510        open(filename, "wb").close()
1511        names = [dirname, filename]
1512
1513        cmdargs = ['-m', 'test',
1514                   '--tempdir=%s' % self.tmptestdir,
1515                   '--cleanup']
1516        self.run_python(cmdargs)
1517
1518        for name in names:
1519            self.assertFalse(os.path.exists(name), name)
1520
1521
1522class TestUtils(unittest.TestCase):
1523    def test_format_duration(self):
1524        self.assertEqual(utils.format_duration(0),
1525                         '0 ms')
1526        self.assertEqual(utils.format_duration(1e-9),
1527                         '1 ms')
1528        self.assertEqual(utils.format_duration(10e-3),
1529                         '10 ms')
1530        self.assertEqual(utils.format_duration(1.5),
1531                         '1.5 sec')
1532        self.assertEqual(utils.format_duration(1),
1533                         '1.0 sec')
1534        self.assertEqual(utils.format_duration(2 * 60),
1535                         '2 min')
1536        self.assertEqual(utils.format_duration(2 * 60 + 1),
1537                         '2 min 1 sec')
1538        self.assertEqual(utils.format_duration(3 * 3600),
1539                         '3 hour')
1540        self.assertEqual(utils.format_duration(3 * 3600  + 2 * 60 + 1),
1541                         '3 hour 2 min')
1542        self.assertEqual(utils.format_duration(3 * 3600 + 1),
1543                         '3 hour 1 sec')
1544
1545
1546if __name__ == '__main__':
1547    unittest.main()
1548