xref: /aosp_15_r20/external/autotest/site_utils/test_runner_utils.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1# Lint as: python2, python3
2# Copyright 2015 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6from __future__ import absolute_import
7from __future__ import division
8from __future__ import print_function
9
10import errno
11import os
12import re
13import shutil
14import signal
15import stat
16import subprocess
17import sys
18import tempfile
19import threading
20
21import logging
22# Turn the logging level to INFO before importing other autotest
23# code, to avoid having failed import logging messages confuse the
24# test_that user.
25logging.basicConfig(level=logging.INFO)
26
27import common
28from autotest_lib.client.common_lib.cros import retry
29from autotest_lib.client.common_lib import logging_manager
30from autotest_lib.server.cros.dynamic_suite import suite, constants
31from autotest_lib.server.hosts import factory
32from autotest_lib.server.hosts import file_store
33from autotest_lib.server.hosts import host_info
34from autotest_lib.server import autoserv_utils
35from autotest_lib.server import server_logging_config
36from autotest_lib.server import utils
37
38
39_autoserv_proc = None
40_sigint_handler_lock = threading.Lock()
41
42_AUTOSERV_SIGINT_TIMEOUT_SECONDS = 5
43NO_BOARD = 'ad_hoc_board'
44NO_BUILD = 'ad_hoc_build'
45NO_MODEL = 'ad_hoc_model'
46_SUITE_REGEX = r'suite:(.*)'
47
48_TEST_KEY_FILENAME = 'testing_rsa'
49TEST_KEY_PATH = ('/mnt/host/source/src/scripts/mod_for_test_scripts/'
50                  'ssh_keys/%s' % _TEST_KEY_FILENAME)
51
52_LATEST_RESULTS_DIRECTORY = '/tmp/test_that_latest'
53_HOST_INFO_SUBDIR = 'host_info_store'
54
55
56class TestThatRunError(Exception):
57    """Raised if test_that encounters something unexpected while running."""
58
59
60class TestThatProvisioningError(Exception):
61    """Raised when it fails to provision the DUT to the requested build."""
62
63
64class TestThatControlError(Exception):
65    """Raise when there is an issue the specified test's control file."""
66
67
68def add_common_args(parser):
69    """
70    Add common arguments for both test_that and test_droid to their parser.
71
72    @param parser: argparse.ArgumentParser object to add arguments to.
73    """
74    parser.add_argument('tests', nargs='+', metavar='TEST',
75                        help='Run given test(s). Use suite:SUITE to specify '
76                             'test suite. Use e:[NAME_PATTERN] to specify a '
77                             'NAME-matching regular expression. Use '
78                             'f:[FILE_PATTERN] to specify a filename matching '
79                             'regular expression. Specified regular '
80                             'expressions will be implicitly wrapped in '
81                             '^ and $.')
82    parser.add_argument('--fast', action='store_true', dest='fast_mode',
83                        default=False,
84                        help='Enable fast mode.  This will cause test_droid '
85                             'to skip time consuming steps like sysinfo and '
86                             'collecting crash information.')
87    parser.add_argument('--args', metavar='ARGS',
88                        help='Whitespace separated argument string to pass '
89                             'through to test. Only supported for runs '
90                             'against a local DUT. '
91                             "e.g. --args='foo=bar cat=\"in a hat\"'.")
92    parser.add_argument('--results_dir', metavar='RESULTS_DIR', default=None,
93                        help='Instead of storing results in a new subdirectory'
94                             ' of /tmp , store results in RESULTS_DIR. If '
95                             'RESULTS_DIR already exists, it will be deleted.')
96    parser.add_argument('--pretend', action='store_true', default=False,
97                        help='Print autoserv commands that would be run, '
98                             'rather than running them.')
99    parser.add_argument('--no-experimental',
100                        action='store_true',
101                        default=False,
102                        dest='no_experimental',
103                        help='DEPRECATED DO NOT USE.')
104    parser.add_argument('--enforce-deps', action='store_true',
105                        default=False, dest='enforce_deps',
106                        help='Skip tests whose DEPENDENCIES can not '
107                             'be satisfied.')
108    parser.add_argument('--debug', action='store_true',
109                        help='Include DEBUG level messages in stdout. Note: '
110                             'these messages will be included in output log '
111                             'file regardless. In addition, turn on autoserv '
112                             'verbosity.')
113    parser.add_argument('--iterations', action='store', type=int, default=1,
114                        help='Number of times to run the tests specified.')
115    parser.add_argument('--ssh_verbosity', action='store', type=int,
116                        choices=[0, 1, 2, 3], default=0,
117                        help='Verbosity level for ssh, between 0 and 3 '
118                             'inclusive.')
119    parser.add_argument('--ssh_options', action='store', default=None,
120                        help='A string giving additional options to be '
121                        'added to ssh commands.')
122
123
124class LocalSuite(suite.Suite):
125    """Subclass of Suite with methods for running locally"""
126
127    def handle_local_result(self, job_id, results_dir, record):
128        """
129        Handle recording and/or retrying a completed job run locally.
130
131        @param job_id: int ID of job
132        @param results_dir: absolute path where test results were stored.
133        @param record: callable that records job status
134
135        @returns: new job_id if a job was scheduled for retry, None otherwise.
136        """
137        logging.debug('Parsing test results for job %s',job_id)
138        code = generate_report(results_dir, just_status_code=True)
139        if not self._retry_handler:
140            return None
141        logging.debug('Handling result of job %s',job_id)
142        logging.debug(self._retry_handler._retry_map)
143        if code == 0:
144            logging.debug('All tests for job %s succeeded, no retry', job_id)
145            if self._retry_handler.job_present(job_id):
146                self._retry_handler.set_attempted(job_id)
147            return None
148
149        new_job_id = None
150        go_ahead = (self._job_retry and
151                    self._retry_handler._should_retry_local_job(job_id))
152        if go_ahead:
153            new_job_id = self._retry_local_result(job_id, record)
154        return new_job_id
155
156    def _retry_local_result(self, job_id, record):
157        """
158        Retry a test job by id.
159
160        @param job_id: int ID of job
161        @param record: callable that records job status.
162                 prototype:
163                   record(base_job.status_log_entry)
164
165        @returns: new job_id if a job was scheduled for retry, None otherwise.
166        """
167        test = self._jobs_to_tests[job_id]
168        logging.debug('Attempting to retry job %s, test %s', job_id, test.name)
169        test.fast = False
170        new_job = self._schedule_test(
171                record=record, test=test, retry_for=job_id)
172        if new_job:
173            return new_job.id
174        return None
175
176    def test_name_from_job(self, job_id):
177        """Find the name of the test run by a job with a given job ID."""
178        if self._jobs_to_tests[job_id]:
179            return self._jobs_to_tests[job_id].name
180
181
182def _run_autoserv(command, pretend=False):
183    """Run autoserv command.
184
185    Run the autoserv command and wait on it. Log the stdout.
186    Ensure that SIGINT signals are passed along to autoserv.
187
188    @param command: the autoserv command to run.
189    @returns: exit code of the command.
190
191    """
192    if not pretend:
193        logging.debug('Running autoserv command: %s', command)
194        global _autoserv_proc
195        _autoserv_proc = subprocess.Popen(command,
196                                          stdout=subprocess.PIPE,
197                                          stderr=subprocess.STDOUT)
198        # This incantation forces unbuffered reading from stdout,
199        # so that autoserv output can be displayed to the user
200        # immediately.
201        for message in iter(_autoserv_proc.stdout.readline, b''):
202            logging.info('autoserv| %s', message.rstrip().decode('utf-8'))
203        _autoserv_proc.wait()
204        returncode = _autoserv_proc.returncode
205        _autoserv_proc = None
206    else:
207        logging.info('Pretend mode. Would run autoserv command: %s',
208                     command)
209        returncode = 0
210    return returncode
211
212
213def run_provisioning_job(provision_label, host, info, autotest_path,
214                         results_directory, fast_mode,
215                         ssh_verbosity=0, ssh_options=None,
216                         pretend=False, autoserv_verbose=False):
217    """Shell out to autoserv to run provisioning job.
218
219    @param provision_label: Label to provision the machine to.
220    @param host: Hostname of DUT.
221    @param info: A host_info.HostInfo for the remote host.
222    @param autotest_path: Absolute path of autotest directory.
223    @param results_directory: Absolute path of directory to store results in.
224                              (results will be stored in subdirectory of this).
225    @param fast_mode: bool to use fast mode (disables slow autotest features).
226    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
227    @param ssh_options: Additional ssh options to be passed to autoserv_utils
228    @param pretend: If True, will print out autoserv commands rather than
229                    running them.
230    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
231
232    @returns: Absolute path of directory where results were stored.
233
234    """
235    # TODO(fdeng): When running against a local DUT, autoserv
236    # is still hitting the AFE in the lab.
237    # provision_QuickProvision checks the current build of DUT by
238    # retrieving build info from AFE. crosbug.com/295178
239    results_directory = os.path.join(results_directory, 'results-provision')
240    _write_host_info(results_directory, _HOST_INFO_SUBDIR, host, info)
241    command = autoserv_utils.autoserv_run_job_command(
242            os.path.join(autotest_path, 'server'),
243            machines=host, job=None, verbose=autoserv_verbose,
244            results_directory=results_directory,
245            fast_mode=fast_mode, ssh_verbosity=ssh_verbosity,
246            ssh_options=ssh_options,
247            extra_args=['--provision', '--job-labels', provision_label],
248            no_console_prefix=True,
249            host_info_subdir=_HOST_INFO_SUBDIR)
250    if _run_autoserv(command, pretend) != 0:
251        raise TestThatProvisioningError('Command returns non-zero code: %s ' %
252                                        command)
253    return results_directory
254
255
256def run_job(job,
257            host,
258            info,
259            autotest_path,
260            results_directory,
261            fast_mode,
262            id_digits=1,
263            ssh_verbosity=0,
264            ssh_options=None,
265            args=None,
266            pretend=False,
267            autoserv_verbose=False,
268            companion_hosts=None,
269            dut_servers=None,
270            is_cft=False,
271            ch_info=None):
272    """
273    Shell out to autoserv to run an individual test job.
274
275    @param job: A Job object containing the control file contents and other
276                relevent metadata for this test.
277    @param host: Hostname of DUT to run test against.
278    @param info: a host_info.HostInfo for the remote host.
279    @param autotest_path: Absolute path of autotest directory.
280    @param results_directory: Absolute path of directory to store results in.
281                              (results will be stored in subdirectory of this).
282    @param fast_mode: bool to use fast mode (disables slow autotest features).
283    @param id_digits: The minimum number of digits that job ids should be
284                      0-padded to when formatting as a string for results
285                      directory.
286    @param ssh_verbosity: SSH verbosity level, passed along to autoserv_utils
287    @param ssh_options: Additional ssh options to be passed to autoserv_utils
288    @param args: String that should be passed as args parameter to autoserv,
289                 and then ultimitely to test itself.
290    @param pretend: If True, will print out autoserv commands rather than
291                    running them.
292    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
293    @param companion_hosts: Companion hosts for the test.
294    @param dut_servers: DUT servers for the test.
295    @param ch_info: hostinfo for companion hosts.
296
297    @returns: a tuple, return code of the job and absolute path of directory
298              where results were stored.
299    """
300    with tempfile.NamedTemporaryFile() as temp_file:
301        temp_file.write(job.control_file.encode())
302        temp_file.flush()
303
304        name_tail = job.ctrlname.split('/')[-1]
305        results_directory = os.path.join(results_directory,
306                                         'results-%0*d-%s' % (id_digits, job.id,
307                                                              name_tail))
308        # Drop experimental keyval in the keval file in the job result folder.
309        os.makedirs(results_directory)
310        utils.write_keyval(results_directory,
311                           {constants.JOB_EXPERIMENTAL_KEY: job.keyvals[
312                                   constants.JOB_EXPERIMENTAL_KEY]})
313        _write_host_info(results_directory, _HOST_INFO_SUBDIR, host, info)
314
315        if ch_info:
316            for chost in companion_hosts.split(" "):
317                _write_host_info(results_directory, _HOST_INFO_SUBDIR, chost,
318                                 ch_info[chost], False)
319
320        extra_args = [temp_file.name]
321        if args:
322            extra_args.extend(['--args', args])
323
324        command = autoserv_utils.autoserv_run_job_command(
325                os.path.join(autotest_path, 'server'),
326                machines=host,
327                job=job,
328                verbose=autoserv_verbose,
329                results_directory=results_directory,
330                fast_mode=fast_mode,
331                ssh_verbosity=ssh_verbosity,
332                ssh_options=ssh_options,
333                extra_args=extra_args,
334                no_console_prefix=True,
335                use_packaging=False,
336                host_attributes=info.attributes,
337                host_info_subdir=_HOST_INFO_SUBDIR,
338                companion_hosts=companion_hosts,
339                dut_servers=dut_servers,
340                is_cft=is_cft)
341
342        code = _run_autoserv(command, pretend)
343        return code, results_directory
344
345
346def setup_local_afe():
347    """
348    Setup a local afe database and return a direct_afe object to access it.
349
350    @returns: A autotest_lib.frontend.afe.direct_afe instance.
351    """
352    # This import statement is delayed until now rather than running at
353    # module load time, because it kicks off a local sqlite :memory: backed
354    # database, and we don't need that unless we are doing a local run.
355    from autotest_lib.frontend import setup_django_lite_environment
356    from autotest_lib.frontend.afe import direct_afe
357    return direct_afe.directAFE()
358
359
360def get_predicate_for_test_arg(test):
361    """
362    Gets a suite predicte function for a given command-line argument.
363
364    @param test: String. An individual TEST command line argument, e.g.
365                         'login_CryptohomeMounted' or 'suite:smoke'
366    @returns: A (predicate, string) tuple with the necessary suite
367              predicate, and a description string of the suite that
368              this predicate will produce.
369    """
370    suitematch = re.match(_SUITE_REGEX, test)
371    name_pattern_match = re.match(r'e:(.*)', test)
372    file_pattern_match = re.match(r'f:(.*)', test)
373    if suitematch:
374        suitename = suitematch.group(1)
375        return (suite.name_in_tag_predicate(suitename),
376                'suite named %s' % suitename)
377    if name_pattern_match:
378        pattern = '^%s$' % name_pattern_match.group(1)
379        return (suite.test_name_matches_pattern_predicate(pattern),
380                'suite to match name pattern %s' % pattern)
381    if file_pattern_match:
382        pattern = '^%s$' % file_pattern_match.group(1)
383        return (suite.test_file_matches_pattern_predicate(pattern),
384                'suite to match file name pattern %s' % pattern)
385    return (suite.test_name_equals_predicate(test),
386            'job named %s' % test)
387
388
389def get_predicate_for_possible_test_arg(test):
390    """
391    Gets a suite predicte function to calculate the similarity of given test
392    and possible tests.
393
394    @param test: String. An individual TEST command line argument, e.g.
395                         'login_CryptohomeMounted' or 'suite:smoke'
396    @returns: A (predicate, string) tuple with the necessary suite
397              predicate, and a description string of the suite that
398              this predicate will produce.
399    """
400    suitematch = re.match(_SUITE_REGEX, test)
401    name_pattern_match = re.match(r'e:(.*)', test)
402    file_pattern_match = re.match(r'f:(.*)', test)
403    if suitematch:
404        suitename = suitematch.group(1)
405        return (suite.name_in_tag_similarity_predicate(suitename),
406                'suite name similar to %s' % suitename)
407    if name_pattern_match:
408        pattern = '^%s$' % name_pattern_match.group(1)
409        return (suite.test_name_similarity_predicate(pattern),
410                'job name similar to %s' % pattern)
411    if file_pattern_match:
412        pattern = '^%s$' % file_pattern_match.group(1)
413        return (suite.test_file_similarity_predicate(pattern),
414                'suite to match file name similar to %s' % pattern)
415    return (suite.test_name_similarity_predicate(test),
416            'job name similar to %s' % test)
417
418
419def add_ssh_identity(temp_directory, ssh_private_key=TEST_KEY_PATH):
420    """Add an ssh identity to the agent.
421
422    TODO (sbasi) b/26186193: Add support for test_droid and make TEST_KEY_PATH
423    not ChromeOS specific.
424
425    @param temp_directory: A directory to copy the |private key| into.
426    @param ssh_private_key: Path to the ssh private key to use for testing.
427    """
428    # Add the testing key to the current ssh agent.
429    if 'SSH_AGENT_PID' in os.environ:
430        # Copy the testing key to the temp directory and make it NOT
431        # world-readable. Otherwise, ssh-add complains.
432        shutil.copy(ssh_private_key, temp_directory)
433        key_copy_path = os.path.join(temp_directory,
434                                     os.path.basename(ssh_private_key))
435        os.chmod(key_copy_path, stat.S_IRUSR | stat.S_IWUSR)
436        p = subprocess.Popen(['ssh-add', key_copy_path],
437                             stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
438        p_out, _ = p.communicate()
439        for line in p_out.splitlines():
440            logging.info(line)
441    else:
442        logging.warning('There appears to be no running ssh-agent. Attempting '
443                        'to continue without running ssh-add, but ssh commands '
444                        'may fail.')
445
446
447def _auto_detect_labels(remote):
448    """Automatically detect host labels and return them.
449
450    Note that the label of board will not be auto-detected.
451
452    @param remote: The hostname of the remote device.
453
454    @returns: the detected labels as a list of strings.
455    """
456    cros_host = factory.create_host(remote)
457    labels_to_create = [label for label in cros_host.get_labels()
458                        if not label.startswith(constants.BOARD_PREFIX)]
459    return labels_to_create
460
461
462def get_all_control_files(test, autotest_path):
463    """Get all control files for specified test in the given autotest_path.
464
465    @param test: name of the test or suite to fetch
466    @praram autotest_path:  Absolute path of autotest installed in sysroot
467    """
468    (predicate, description) = get_predicate_for_test_arg(test)
469    logging.info('Fetching suite for %s...', description)
470    return get_control_files(autotest_path=autotest_path, pred=predicate)
471
472
473def get_possible_tests(test, autotest_path):
474    fs_getter = suite.create_fs_getter(autotest_path)
475
476    (similarity_predicate,
477     similarity_description) = (get_predicate_for_possible_test_arg(test))
478
479    logging.error('No test found, searching for possible tests with %s',
480                  similarity_description)
481    possible_tests = suite.find_possible_tests(fs_getter, similarity_predicate)
482    raise SystemExit('Found no tests. Check your suite name, test name, '
483                     'or test matching wildcard.\nDid you mean any of '
484                     'following tests?\n  %s' % '\n  '.join(possible_tests))
485
486
487def perform_local_run(autotest_path,
488                      tests,
489                      remote,
490                      fast_mode,
491                      build=NO_BUILD,
492                      board=NO_BOARD,
493                      model=NO_MODEL,
494                      args=None,
495                      pretend=False,
496                      ignore_deps=True,
497                      results_directory=None,
498                      ssh_verbosity=0,
499                      ssh_options=None,
500                      autoserv_verbose=False,
501                      iterations=1,
502                      host_attributes={},
503                      job_retry=True,
504                      companion_hosts=None,
505                      minus=[],
506                      dut_servers=None,
507                      is_cft=False,
508                      host_labels=None,
509                      label=None):
510    """Perform local run of tests.
511
512    This method enforces satisfaction of test dependencies for tests that are
513    run as a part of a suite.
514
515    @param autotest_path: Absolute path of autotest installed in sysroot or
516                          custom autotest path set by --autotest_dir.
517    @param tests: List of strings naming tests and suites to run. Suite strings
518                  should be formed like "suite:smoke".
519    @param remote: Remote hostname.
520    @param fast_mode: bool to use fast mode (disables slow autotest features).
521    @param build: String specifying build for local run.
522    @param board: String specifying board for local run.
523    @param model: String specifying model for local run.
524    @param args: String that should be passed as args parameter to autoserv,
525                 and then ultimitely to test itself.
526    @param pretend: If True, will print out autoserv commands rather than
527                    running them.
528    @param results_directory: Directory to store results in. Defaults to None,
529                              in which case results will be stored in a new
530                              subdirectory of /tmp
531    @param ssh_verbosity: SSH verbosity level, passed through to
532                          autoserv_utils.
533    @param ssh_options: Additional ssh options to be passed to autoserv_utils
534    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
535    @param iterations: int number of times to schedule tests.
536    @param host_attributes: Dict of host attributes to pass into autoserv.
537    @param job_retry: If False, tests will not be retried at all.
538    @param companion_hosts: companion hosts for the test.
539    @param dut_servers: dut servers for the test.
540    @param label: Optional label to use for the jobname. Will be appended to
541        the keyval file via server_job.
542
543    @returns: A list of return codes each job that has run. Or [1] if
544              provision failed prior to running any jobs.
545    """
546    args = _set_default_servo_args(args)
547
548    # version doesn't really matter for local runs...
549    if not host_labels:
550        host_labels = [
551                u'cros-version:ad_hoc_build',
552                u'board:%s' % board,
553                u'model:%s' % model
554        ]
555        if not ignore_deps:
556            logging.info('Auto-detecting labels for %s', remote)
557            # Auto-detected labels may duplicate explicitly set ones.
558            host_labels += list(set(_auto_detect_labels(remote)))
559
560    else:
561        host_labels = host_labels.split(" ")
562    info = host_info.HostInfo(host_labels, host_attributes)
563
564    # If using test_that, there needs to a hostinfo file (even if blank)
565    # for each host (including companions).
566    # TODO: Determine if we want to auto-detect labels, and/or expose
567    # CLI options for them (which might be required in CFT)
568    ch_info = {}
569    if companion_hosts:
570        for chost in companion_hosts.split(" "):
571            chost_labels = []
572            if not ignore_deps:
573                logging.info('Auto-detecting labels for %s', chost)
574                # Auto-detected labels may duplicate explicitly set ones.
575                chost_labels += list(set(_auto_detect_labels(chost)))
576            ch_info[chost] = host_info.HostInfo(chost_labels, {})
577
578    job_queue = []
579    test_num = 0
580
581    m_queue = []
582    for m in minus:
583        ctrl_files = get_all_control_files(m, autotest_path)
584        for ctrl in ctrl_files:
585            m_queue.append(ctrl)
586
587    if iterations > 1:
588        logging.info("Scheduling for %s iterations", iterations)
589    for _ in range(iterations):
590        for test in tests:
591            ctrl_files = get_all_control_files(test, autotest_path)
592            if len(ctrl_files) == 0:
593                get_possible_tests(test, autotest_path)
594            for control in ctrl_files:
595                if any([control.name == no_run.name for no_run in m_queue]):
596                    continue
597                test_num += 1
598                if label:
599                    name = label
600                else:
601                    name = "adhoc/{}".format(control.name)
602                job = SimpleJob(name=name,
603                                owner='autotest_system',
604                                test_num=test_num,
605                                ctrlname=control.name)
606                job.set_control_file(control)
607                if ignore_deps:
608                    job_queue.append(job)
609                elif job.deps_satisfied(host_labels):
610                    job_queue.append(job)
611    _set_pyversion(job_queue)
612    codes = []
613    job_id_digits = 0
614    for job in job_queue:
615        logging.info('%s jobs in job queue', len(job_queue))
616        # could also math.log10... but for a single conversion, not worth.
617        job_id_digits = len(str(job.id))
618        logging.debug('Running job %s of test %s', job.id, (job.name))
619        code, abs_dir = run_job(job=job,
620                                host=remote,
621                                info=info,
622                                autotest_path=autotest_path,
623                                results_directory=results_directory,
624                                fast_mode=fast_mode,
625                                id_digits=job_id_digits,
626                                ssh_verbosity=ssh_verbosity,
627                                ssh_options=ssh_options,
628                                args=args,
629                                pretend=pretend,
630                                autoserv_verbose=autoserv_verbose,
631                                companion_hosts=companion_hosts,
632                                dut_servers=dut_servers,
633                                is_cft=is_cft,
634                                ch_info=ch_info)
635        codes.append(code)
636        logging.debug("Code: %s, Results in %s", code, abs_dir)
637
638    return codes
639
640
641def _set_default_servo_args(args):
642    """Add default servo arguments for backward compatibitlity.
643
644    See crbug.com/881006 for context.  Some servo related defaults were baked
645    into the autotest ServoHost code. These have now been deleted. A side effect
646    was that users of test_that relied on these defaults for some tests to work
647    magically in the chroot environment.
648
649    Current plan is to add back these defaults to test_that invocations for
650    backwards compatibility of these use cases. There is no planned removal date
651    for this hack.
652
653    @return modified args str.
654    """
655    # args is a str with whitespace separated key=value arguments.
656    # Avoid parsing args here (to avoid adding another implicit constraint on
657    # the exact args format) by adding defaults only in the obvious cases where
658    # relevant keys are entirely missing.
659    if args is None:
660        args = ''
661    if 'servo_host' not in args:
662        args += ' servo_host=localhost'
663    if 'servo_port' not in args:
664        args += ' servo_port=9999'
665    return args
666
667
668def sigint_handler(signum, stack_frame):
669    #pylint: disable-msg=C0111
670    """Handle SIGINT or SIGTERM to a local test_that run.
671
672    This handler sends a SIGINT to the running autoserv process,
673    if one is running, giving it up to 5 seconds to clean up and exit. After
674    the timeout elapses, autoserv is killed. In either case, after autoserv
675    exits then this process exits with status 1.
676    """
677    # If multiple signals arrive before handler is unset, ignore duplicates
678    if not _sigint_handler_lock.acquire(False):
679        return
680    try:
681        # Ignore future signals by unsetting handler.
682        signal.signal(signal.SIGINT, signal.SIG_IGN)
683        signal.signal(signal.SIGTERM, signal.SIG_IGN)
684
685        logging.warning('Received SIGINT or SIGTERM. Cleaning up and exiting.')
686        if _autoserv_proc:
687            logging.warning('Sending SIGINT to autoserv process. Waiting up '
688                            'to %s seconds for cleanup.',
689                            _AUTOSERV_SIGINT_TIMEOUT_SECONDS)
690            _autoserv_proc.send_signal(signal.SIGINT)
691            timed_out, _ = retry.timeout(_autoserv_proc.wait,
692                    timeout_sec=_AUTOSERV_SIGINT_TIMEOUT_SECONDS)
693            if timed_out:
694                _autoserv_proc.kill()
695                logging.warning('Timed out waiting for autoserv to handle '
696                                'SIGINT. Killed autoserv.')
697    finally:
698        _sigint_handler_lock.release() # this is not really necessary?
699        sys.exit(1)
700
701
702def create_results_directory(results_directory=None, board_name=None):
703    """Create a results directory.
704
705    If no directory is specified this method will create and return a
706    temp directory to hold results. If a directory name is specified this
707    method will create a directory at the given path, provided it doesn't
708    already exist.
709
710    @param results_directory: The path to the results_directory to create.
711
712    @return results_directory: A path to the results_directory, ready for use.
713    """
714    if results_directory is None:
715        # Create a results_directory as subdir of /tmp
716        dirname_prefix='test_that_results_'
717        if board_name is not None:
718            dirname_prefix += (board_name + '_')
719        results_directory = tempfile.mkdtemp(prefix=dirname_prefix)
720    else:
721        # Delete results_directory if it already exists.
722        try:
723            shutil.rmtree(results_directory)
724        except OSError as e:
725            if e.errno != errno.ENOENT:
726                raise
727
728        # Create results_directory if it does not exist
729        try:
730            os.makedirs(results_directory)
731        except OSError as e:
732            if e.errno != errno.EEXIST:
733                raise
734    return results_directory
735
736
737def generate_report(directory,
738                    allow_chrome_crashes=False,
739                    just_status_code=False,
740                    html_report=False,
741                    is_cft=False):
742    """Parse the test result files in the given directory into a report.
743
744    @param directory: string, the absolute path of the directory to look in
745    @param allow_chrome_crashes: boolean, ignore Chrome crashes in the
746    report. Default: False, report Chrome crashes.
747    @param just_status_code: boolean, skip the report and only parse the files
748    to determine whether there were failures. Default: False, generate report.
749    """
750    test_report_command = [os.path.join(os.path.dirname(__file__),
751                                        'generate_test_report')]
752    # Experimental test results do not influence the exit code.
753    test_report_command.append('--ignore_experimental_tests')
754    if is_cft:
755        test_report_command.append('--cft')
756    if html_report:
757        test_report_command.append('--html')
758        test_report_command.append('--html-report-dir=%s' % directory)
759    if allow_chrome_crashes:
760        test_report_command.append('--allow_chrome_crashes')
761    if just_status_code:
762        test_report_command.append('--just_status_code')
763    test_report_command.append(directory)
764    status_code = subprocess.call(test_report_command)
765    if not just_status_code:
766        with open(os.path.join(directory, 'test_report.log'),
767                  'w') as report_log:
768            subprocess.call(test_report_command, stdout=report_log)
769    return status_code
770
771
772def perform_run_from_autotest_root(autotest_path,
773                                   argv,
774                                   tests,
775                                   remote,
776                                   build=NO_BUILD,
777                                   board=NO_BOARD,
778                                   model=NO_MODEL,
779                                   args=None,
780                                   pretend=False,
781                                   ignore_deps=True,
782                                   results_directory=None,
783                                   ssh_verbosity=0,
784                                   ssh_options=None,
785                                   iterations=1,
786                                   fast_mode=False,
787                                   debug=False,
788                                   allow_chrome_crashes=False,
789                                   host_attributes={},
790                                   job_retry=True,
791                                   companion_hosts=None,
792                                   minus=[],
793                                   dut_servers=None,
794                                   is_cft=False,
795                                   host_labels=None,
796                                   label=None):
797    """
798    Perform a test_that run, from the |autotest_path|.
799
800    This function is to be called from test_that/test_droid's main() script,
801    when tests are executed from the |autotest_path|. It handles all stages
802    of a test run that come after the bootstrap into |autotest_path|.
803
804    @param autotest_path: Full absolute path to the autotest root directory.
805    @param argv: The arguments list, as passed to main(...)
806    @param tests: List of strings naming tests and suites to run. Suite strings
807                  should be formed like "suite:smoke".
808    @param remote: Remote hostname.
809    @param build: String specifying build for local run.
810    @param board: String specifying board for local run.
811    @param model: String specifying model for local run.
812    @param args: String that should be passed as args parameter to autoserv,
813                 and then ultimitely to test itself.
814    @param pretend: If True, will print out autoserv commands rather than
815                    running them.
816    @param ignore_deps: If True, test dependencies will be ignored.
817    @param results_directory: Directory to store results in. Defaults to None,
818                              in which case results will be stored in a new
819                              subdirectory of /tmp
820    @param ssh_verbosity: SSH verbosity level, passed through to
821                          autoserv_utils.
822    @param ssh_options: Additional ssh options to be passed to autoserv_utils
823    @param autoserv_verbose: If true, pass the --verbose flag to autoserv.
824    @param iterations: int number of times to schedule tests.
825    @param fast_mode: bool to use fast mode (disables slow autotest features).
826    @param debug: Logging and autoserv verbosity.
827    @param allow_chrome_crashes: If True, allow chrome crashes.
828    @param host_attributes: Dict of host attributes to pass into autoserv.
829    @param job_retry: If False, tests will not be retried at all.
830    @param companion_hosts: companion hosts for the test.
831    @param dut_servers: dut servers for the test.
832    @param label: Optional label to use for the jobname. Will be appended to
833        the keyval file via server_job.
834
835    @return: A return code that test_that should exit with.
836    """
837    if results_directory is None or not os.path.exists(results_directory):
838        raise ValueError('Expected valid results directory, got %s' %
839                          results_directory)
840
841    logging_manager.configure_logging(
842            server_logging_config.ServerLoggingConfig(),
843            results_dir=results_directory,
844            use_console=True,
845            verbose=debug,
846            debug_log_name='test_that')
847    logging.info('Began logging to %s', results_directory)
848
849    logging.debug('test_that command line was: %s', argv)
850
851    signal.signal(signal.SIGINT, sigint_handler)
852    signal.signal(signal.SIGTERM, sigint_handler)
853
854    codes = perform_local_run(autotest_path,
855                              tests,
856                              remote,
857                              fast_mode,
858                              build,
859                              board,
860                              model,
861                              args=args,
862                              pretend=pretend,
863                              ignore_deps=ignore_deps,
864                              results_directory=results_directory,
865                              ssh_verbosity=ssh_verbosity,
866                              ssh_options=ssh_options,
867                              autoserv_verbose=debug,
868                              iterations=iterations,
869                              host_attributes=host_attributes,
870                              job_retry=job_retry,
871                              companion_hosts=companion_hosts,
872                              minus=minus,
873                              dut_servers=dut_servers,
874                              is_cft=is_cft,
875                              host_labels=host_labels,
876                              label=label)
877    if pretend:
878        logging.info('Finished pretend run. Exiting.')
879        return 0
880
881    final_result = generate_report(results_directory,
882                                   allow_chrome_crashes=allow_chrome_crashes,
883                                   html_report=True,
884                                   is_cft=is_cft)
885    try:
886        os.unlink(_LATEST_RESULTS_DIRECTORY)
887    except OSError:
888        pass
889    link_target = os.path.relpath(results_directory,
890                                  os.path.dirname(_LATEST_RESULTS_DIRECTORY))
891    if any(codes):
892        logging.error('Autoserv encountered unexpected errors '
893                      'when executing jobs.')
894        final_result = final_result or 1
895    os.symlink(link_target, _LATEST_RESULTS_DIRECTORY)
896    logging.info('Finished running tests. Results can be found in %s or %s',
897                 results_directory, _LATEST_RESULTS_DIRECTORY)
898    return final_result
899
900
901def _write_host_info(results_dir,
902                     host_info_subdir,
903                     hostname,
904                     info,
905                     new_dir=True):
906    """ Write HostInfo to a FileStore to be used by autoserv.
907
908    @param results_dir: Path to the results directory.
909    @param host_info_subdir: Subdirectory of results directory for host info.
910    @param hostname: Hostname passed into autoserv.
911    @param info: hosts.HostInfo to write.
912    """
913    d = os.path.join(results_dir, host_info_subdir)
914    if new_dir:
915        os.makedirs(d)
916    store = file_store.FileStore(os.path.join(d, '%s.store' % hostname))
917    store.commit(info)
918
919
920class SimpleJob(object):
921    """
922    A Simple job for running autotests without an AFE.
923
924    The goal here is to remove the deps to frontend/afe, and their dependent
925    libs. Autotests will be run via 2 methods going forward: Skylab world, and
926    test_that. Skylab invokes autoserv directly, bypassing all of this.
927    test_that is a CLI, not a UI, and should be split free of the AFE libs.
928    """
929
930    def __init__(self,
931                 owner,
932                 name,
933                 control_type='client',
934                 test_num=1,
935                 ctrlname=None):
936        self.owner = owner
937        self.name = name
938        self.control_type = control_type
939        self.id = test_num
940        self.keyvals = {'experimental': False}
941        self.dependencies = []
942        self.py_version = None
943        self.ctrlname = ctrlname
944
945    def set_control_file(self, control):
946        self.control_file = control.text
947        self.control_type = control.test_type.capitalize()
948        if hasattr(control, 'dependencies'):
949            self.dependencies = set(control.dependencies)
950        if control.py_version and control.py_version not in (2, 3):
951            raise TestThatControlError(
952                    "Test py_version not compatible. Expected 2 or 3 got %s" %
953                    control.py_version)
954        self.py_version = control.py_version
955
956    def deps_satisfied(self, labels):
957        """Verify the deps for this job are satisfied on the given labels"""
958        return self.dependencies.issubset(labels)
959
960
961def _set_pyversion(tests):
962    """If there is a py_version specified, set it in the env.
963
964    If not, set it to 2. If 2 is set, lock the entire suite into 2.
965    Different versions in the same suite is *not* supported.
966    """
967    set2 = all(v.py_version == 2 for v in tests)
968    set3 = all(v.py_version == 3 for v in tests)
969    if not set2 and not set3:
970        return
971    if set2:
972        os.environ['PY_VERSION'] = "2"
973    elif set3:
974        os.environ['PY_VERSION'] = "3"
975
976
977def get_control_files(autotest_path, pred):
978    cf_getter = suite.create_fs_getter(autotest_path)
979    return list(suite.find_and_parse_tests(cf_getter, pred))
980