xref: /aosp_15_r20/external/autotest/client/common_lib/test.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1# Lint as: python2, python3
2# Shell class for a test, inherited by all individual tests
3#
4# Methods:
5#       __init__        initialise
6#       initialize      run once for each job
7#       setup           run once for each new version of the test installed
8#       run             run the test (wrapped by job.run_test())
9#
10# Data:
11#       job             backreference to the job this test instance is part of
12#       outputdir       eg. results/<job>/<testname.tag>
13#       resultsdir      eg. results/<job>/<testname.tag>/results
14#       profdir         eg. results/<job>/<testname.tag>/profiling
15#       debugdir        eg. results/<job>/<testname.tag>/debug
16#       bindir          eg. tests/<test>
17#       src             eg. tests/<test>/src
18#       tmpdir          eg. tmp/<tempname>_<testname.tag>
19
20#pylint: disable=C0111
21
22from __future__ import absolute_import
23from __future__ import division
24from __future__ import print_function
25
26import errno
27import fcntl
28import json
29import logging
30import os
31import re
32import shutil
33import six
34from six.moves import map
35from six.moves import range
36import stat
37import sys
38import tempfile
39import time
40import traceback
41
42from autotest_lib.client.bin import utils
43from autotest_lib.client.common_lib import error
44from autotest_lib.client.common_lib import utils as client_utils
45
46try:
47    from autotest_lib.utils.frozen_chromite.lib import metrics
48except ImportError:
49    metrics = client_utils.metrics_mock
50
51
52class base_test(object):
53    preserve_srcdir = False
54
55    def __init__(self, job, bindir, outputdir):
56        self.job = job
57        self.pkgmgr = job.pkgmgr
58        self.autodir = job.autodir
59        self.outputdir = outputdir
60        self.tagged_testname = os.path.basename(self.outputdir)
61        self.resultsdir = os.path.join(self.outputdir, 'results')
62        os.mkdir(self.resultsdir)
63        self.profdir = os.path.join(self.outputdir, 'profiling')
64        os.mkdir(self.profdir)
65        self.debugdir = os.path.join(self.outputdir, 'debug')
66        os.mkdir(self.debugdir)
67        # TODO(ericli): figure out how autotest crash handler work with cros
68        # Once this is re-enabled import getpass. crosbug.com/31232
69        # crash handler, we should restore it in near term.
70        # if getpass.getuser() == 'root':
71        #     self.configure_crash_handler()
72        # else:
73        self.crash_handling_enabled = False
74        self.bindir = bindir
75        self.srcdir = os.path.join(self.bindir, 'src')
76        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
77                                       dir=job.tmpdir)
78        # The crash_reporter uses this file to determine which test is in
79        # progress.
80        self.test_in_prog_file = '/run/crash_reporter/test-in-prog'
81        self._keyvals = []
82        self._new_keyval = False
83        self.failed_constraints = []
84        self.iteration = 0
85        self.before_iteration_hooks = []
86        self.after_iteration_hooks = []
87
88        # Flag to indicate if the test has succeeded or failed.
89        self.success = False
90
91        # Flag to indicate if test results should be gathered when pass.
92        self.collect_full_logs = False
93
94    def configure_crash_handler(self):
95        pass
96
97
98    def crash_handler_report(self):
99        pass
100
101
102    def assert_(self, expr, msg='Assertion failed.'):
103        if not expr:
104            raise error.TestError(msg)
105
106
107    def write_test_keyval(self, attr_dict):
108        utils.write_keyval(self.outputdir, attr_dict)
109
110
111    @staticmethod
112    def _append_type_to_keys(dictionary, typename):
113        new_dict = {}
114        for key, value in six.iteritems(dictionary):
115            new_key = "%s{%s}" % (key, typename)
116            new_dict[new_key] = value
117        return new_dict
118
119    def output_perf_value(self, description, value, units=None,
120                          higher_is_better=None, graph=None,
121                          replacement='_', replace_existing_values=False,
122                          resultsdir=None):
123        """
124        Records a measured performance value in an output file.
125
126        The output file will subsequently be parsed by the TKO parser to have
127        the information inserted into the results database.
128
129        @param description: A string describing the measured perf value. Must
130                be maximum length 256, and may only contain letters, numbers,
131                periods, dashes, and underscores.  For example:
132                "page_load_time", "scrolling-frame-rate".
133        @param value: A number representing the measured perf value, or a list
134                of measured values if a test takes multiple measurements.
135                Measured perf values can be either ints or floats.
136        @param units: A string describing the units associated with the
137                measured perf value. Must be maximum length 32, and may only
138                contain letters, numbers, periods, dashes, and underscores.
139                For example: "msec", "fps", "score", "runs_per_second".
140        @param higher_is_better: A boolean indicating whether or not a "higher"
141                measured perf value is considered to be better. If False, it is
142                assumed that a "lower" measured value is considered to be
143                better. This impacts dashboard plotting and email notification.
144                Pure autotests are expected to specify either True or False!
145                This value can be set to "None" to indicate that the perf
146                dashboard should apply the rules encoded via Chromium
147                unit-info.json. This is only used for tracking Chromium based
148                tests (in particular telemetry).
149        @param graph: A string indicating the name of the graph on which
150                the perf value will be subsequently displayed on the chrome perf
151                dashboard. This allows multiple metrics be grouped together on
152                the same graphs. Defaults to None, indicating that the perf
153                value should be displayed individually on a separate graph.
154        @param replacement: string to replace illegal characters in
155                |description| and |units| with.
156        @param replace_existing_values: A boolean indicating whether or not a
157                new added perf value should replace existing perf.
158        @param resultsdir: An optional path to specify a custom output
159                directory.
160        """
161        if len(description) > 256:
162            raise ValueError('The description must be at most 256 characters.')
163        if units and len(units) > 32:
164            raise ValueError('The units must be at most 32 characters.')
165
166        # If |replacement| is legal replace illegal characters with it.
167        string_regex = re.compile(r'[^-\.\w]')
168        if replacement is None or re.search(string_regex, replacement):
169            raise ValueError('Invalid replacement string to mask illegal '
170                             'characters. May only contain letters, numbers, '
171                             'periods, dashes, and underscores. '
172                             'replacement: %s' % replacement)
173        description = re.sub(string_regex, replacement, description)
174        units = re.sub(string_regex, replacement, units) if units else None
175
176        charts = {}
177        if not resultsdir:
178            resultsdir = self.resultsdir
179        if not os.path.exists(resultsdir):
180            os.makedirs(resultsdir)
181        output_file = os.path.join(resultsdir, 'results-chart.json')
182        if os.path.isfile(output_file):
183            with open(output_file, 'r') as fp:
184                contents = fp.read()
185                if contents:
186                    charts = json.loads(contents)
187
188        if graph:
189            first_level = graph
190            second_level = description
191        else:
192            first_level = description
193            second_level = 'summary'
194
195        direction = 'up' if higher_is_better else 'down'
196
197        # All input should be a number - but at times there are strings
198        # representing numbers logged, attempt to convert them to numbers.
199        # If a non number string is logged an exception will be thrown.
200        if isinstance(value, list):
201            value = list(map(float, value))
202        else:
203            value = float(value)
204
205        result_type = 'scalar'
206        value_key = 'value'
207        result_value = value
208
209        # The chart json spec go/telemetry-json differenciates between a single
210        # value vs a list of values.  Lists of values get extra processing in
211        # the chromeperf dashboard ( mean, standard deviation etc)
212        # Tests can log one or more values for the same metric, to adhere stricly
213        # to the specification the first value logged is a scalar but if another
214        # value is logged the results become a list of scalar.
215        # TODO Figure out if there would be any difference of always using list
216        # of scalar even if there is just one item in the list.
217        if isinstance(value, list):
218            result_type = 'list_of_scalar_values'
219            value_key = 'values'
220            if first_level in charts and second_level in charts[first_level]:
221                if 'values' in charts[first_level][second_level]:
222                    result_value = charts[first_level][second_level]['values']
223                elif 'value' in charts[first_level][second_level]:
224                    result_value = [charts[first_level][second_level]['value']]
225                if replace_existing_values:
226                    result_value = value
227                else:
228                    result_value.extend(value)
229            else:
230                result_value = value
231        elif (first_level in charts and second_level in charts[first_level] and
232              not replace_existing_values):
233            result_type = 'list_of_scalar_values'
234            value_key = 'values'
235            if 'values' in charts[first_level][second_level]:
236                result_value = charts[first_level][second_level]['values']
237                result_value.append(value)
238            else:
239                result_value = [charts[first_level][second_level]['value'], value]
240
241        test_data = {
242            second_level: {
243                 'type': result_type,
244                 'units': units,
245                 value_key: result_value,
246                 'improvement_direction': direction
247           }
248        }
249
250        if first_level in charts:
251            charts[first_level].update(test_data)
252        else:
253            charts.update({first_level: test_data})
254
255        with open(output_file, 'w') as fp:
256            fp.write(json.dumps(charts, indent=2))
257
258
259    def write_perf_keyval(self, perf_dict):
260        self.write_iteration_keyval({}, perf_dict)
261
262
263    def write_attr_keyval(self, attr_dict):
264        self.write_iteration_keyval(attr_dict, {})
265
266
267    def write_iteration_keyval(self, attr_dict, perf_dict):
268        # append the dictionaries before they have the {perf} and {attr} added
269        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
270        self._new_keyval = True
271
272        if attr_dict:
273            attr_dict = self._append_type_to_keys(attr_dict, "attr")
274            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
275
276        if perf_dict:
277            perf_dict = self._append_type_to_keys(perf_dict, "perf")
278            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
279
280        keyval_path = os.path.join(self.resultsdir, "keyval")
281        print("", file=open(keyval_path, "a"))
282
283
284    def analyze_perf_constraints(self, constraints):
285        if not self._new_keyval:
286            return
287
288        # create a dict from the keyvals suitable as an environment for eval
289        keyval_env = self._keyvals[-1]['perf'].copy()
290        keyval_env['__builtins__'] = None
291        self._new_keyval = False
292        failures = []
293
294        # evaluate each constraint using the current keyvals
295        for constraint in constraints:
296            logging.info('___________________ constraint = %s', constraint)
297            logging.info('___________________ keyvals = %s', keyval_env)
298
299            try:
300                if not eval(constraint, keyval_env):
301                    failures.append('%s: constraint was not met' % constraint)
302            except:
303                failures.append('could not evaluate constraint: %s'
304                                % constraint)
305
306        # keep track of the errors for each iteration
307        self.failed_constraints.append(failures)
308
309
310    def process_failed_constraints(self):
311        msg = ''
312        for i, failures in enumerate(self.failed_constraints):
313            if failures:
314                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
315
316        if msg:
317            raise error.TestFail(msg)
318
319
320    def register_before_iteration_hook(self, iteration_hook):
321        """
322        This is how we expect test writers to register a before_iteration_hook.
323        This adds the method to the list of hooks which are executed
324        before each iteration.
325
326        @param iteration_hook: Method to run before each iteration. A valid
327                               hook accepts a single argument which is the
328                               test object.
329        """
330        self.before_iteration_hooks.append(iteration_hook)
331
332
333    def register_after_iteration_hook(self, iteration_hook):
334        """
335        This is how we expect test writers to register an after_iteration_hook.
336        This adds the method to the list of hooks which are executed
337        after each iteration. Hooks are executed starting with the most-
338        recently registered, in stack fashion.
339
340        @param iteration_hook: Method to run after each iteration. A valid
341                               hook accepts a single argument which is the
342                               test object.
343        """
344        self.after_iteration_hooks.append(iteration_hook)
345
346
347    def initialize(self):
348        pass
349
350
351    def setup(self):
352        pass
353
354
355    def warmup(self, *args, **dargs):
356        pass
357
358
359    def drop_caches_between_iterations(self):
360        if self.job.drop_caches_between_iterations:
361            utils.drop_caches()
362
363
364    def _call_run_once(self, constraints, profile_only,
365                       postprocess_profiled_run, args, dargs):
366        self.drop_caches_between_iterations()
367        # execute iteration hooks
368        if not self.job.fast:
369            logging.debug('Starting before_iteration_hooks for %s',
370                          self.tagged_testname)
371            with metrics.SecondsTimer(
372                    'chromeos/autotest/job/before_iteration_hook_duration'):
373                for hook in self.before_iteration_hooks:
374                    hook(self)
375            logging.debug('before_iteration_hooks completed')
376
377        finished = False
378
379        # Mark the current test in progress so that crash_reporter can report
380        # it in uploaded crashes.
381        # if the file already exists, truncate and overwrite.
382        # TODO(mutexlox): Determine what to do if the file already exists, which
383        # could happen for a few reasons:
384        #   * An earlier tast or autotest run crashed before removing the
385        #     test-in-prog file. In this case, we would ideally overwrite the
386        #     existing test name and _not_ restore it.
387        #   * An autotest ran another autotest (e.g. logging_GenerateCrashFiles
388        #     runs desktopui_SimpleLogin). In this case, arguably it makes sense
389        #     to attribute the crash to logging_GenerateCrashFiles and not to
390        #     desktopui_SimpleLogin (or to attribute it to both), since the
391        #     context in which it's running is different than if it were run on
392        #     its own.
393        #   * Every tast test is kicked off by the 'tast' autotest (see
394        #     server/site_tests/tast/tast.py), so the file will always be
395        #     populated when the tast suite starts running. In this case, we
396        #     want to attribute crashes that happen during a specific test to
397        #     that test, but if the tast infra causes a crash we should
398        #     attribute the crash to it (e.g. to the 'tast.critical-system'
399        #     "autotest").  For this case, we should save the contents of the
400        #     file before a test and restore it after.
401        if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
402                dargs['host'].is_up_fast()):
403            dargs['host'].run('echo %s > %s' %
404                              (self.tagged_testname, self.test_in_prog_file),
405                              ignore_status=True)
406        else:
407            crash_run_dir = os.path.dirname(self.test_in_prog_file)
408            try:
409                # Only try to create the file if the directory already exists
410                # (otherwise, we may not be on a CrOS device)
411                if os.path.exists(crash_run_dir):
412                    with open(self.test_in_prog_file, 'w') as f:
413                        f.write(self.tagged_testname)
414            except:  # Broad 'except' because we don't want this to block tests
415                logging.warning('failed to write in progress test name')
416        try:
417            if profile_only:
418                if not self.job.profilers.present():
419                    self.job.record('WARN', None, None,
420                                    'No profilers have been added but '
421                                    'profile_only is set - nothing '
422                                    'will be run')
423                self.run_once_profiling(postprocess_profiled_run,
424                                        *args, **dargs)
425            else:
426                self.before_run_once()
427                logging.debug('starting test(run_once()), test details follow'
428                              '\nargs: %r\nkwargs: %r', args, dargs)
429                self.run_once(*args, **dargs)
430                logging.debug('The test has completed successfully')
431                self.after_run_once()
432
433            self.postprocess_iteration()
434            self.analyze_perf_constraints(constraints)
435            finished = True
436        # Catch and re-raise to let after_iteration_hooks see the exception.
437        except Exception as e:
438            logging.debug('Test failed due to %s. Exception log follows the '
439                          'after_iteration_hooks.', str(e))
440            raise
441        finally:
442            if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
443                    dargs['host'].is_up_fast()):
444                dargs['host'].run('rm -f %s' % self.test_in_prog_file)
445            else:
446                try:
447                    # Unmark the test as running.
448                    os.remove(self.test_in_prog_file)
449                except OSError as e:
450                    # If something removed it, do nothing--we're in the desired
451                    # state (the file is gone). Otherwise, log.
452                    if e.errno != errno.ENOENT:
453                        logging.warning(
454                                "Couldn't remove test-in-prog file: %s",
455                                traceback.format_exc())
456
457            if not finished or not self.job.fast:
458                logging.debug('Starting after_iteration_hooks for %s',
459                              self.tagged_testname)
460                with metrics.SecondsTimer(
461                        'chromeos/autotest/job/after_iteration_hook_duration'):
462                    for hook in reversed(self.after_iteration_hooks):
463                        hook(self)
464                logging.debug('after_iteration_hooks completed')
465
466
467    def execute(self, iterations=None, test_length=None, profile_only=None,
468                _get_time=time.time, postprocess_profiled_run=None,
469                constraints=(), *args, **dargs):
470        """
471        This is the basic execute method for the tests inherited from base_test.
472        If you want to implement a benchmark test, it's better to implement
473        the run_once function, to cope with the profiling infrastructure. For
474        other tests, you can just override the default implementation.
475
476        @param test_length: The minimum test length in seconds. We'll run the
477            run_once function for a number of times large enough to cover the
478            minimum test length.
479
480        @param iterations: A number of iterations that we'll run the run_once
481            function. This parameter is incompatible with test_length and will
482            be silently ignored if you specify both.
483
484        @param profile_only: If true run X iterations with profilers enabled.
485            If false run X iterations and one with profiling if profiles are
486            enabled. If None, default to the value of job.default_profile_only.
487
488        @param _get_time: [time.time] Used for unit test time injection.
489
490        @param postprocess_profiled_run: Run the postprocessing for the
491            profiled run.
492        """
493
494        # For our special class of tests, the benchmarks, we don't want
495        # profilers to run during the test iterations. Let's reserve only
496        # the last iteration for profiling, if needed. So let's stop
497        # all profilers if they are present and active.
498        profilers = self.job.profilers
499        if profilers.active():
500            profilers.stop(self)
501        if profile_only is None:
502            profile_only = self.job.default_profile_only
503        # If the user called this test in an odd way (specified both iterations
504        # and test_length), let's warn them.
505        if iterations and test_length:
506            logging.debug('Iterations parameter ignored (timed execution)')
507        if test_length:
508            test_start = _get_time()
509            time_elapsed = 0
510            timed_counter = 0
511            logging.debug('Test started. Specified %d s as the minimum test '
512                          'length', test_length)
513            while time_elapsed < test_length:
514                timed_counter = timed_counter + 1
515                if time_elapsed == 0:
516                    logging.debug('Executing iteration %d', timed_counter)
517                elif time_elapsed > 0:
518                    logging.debug('Executing iteration %d, time_elapsed %d s',
519                                  timed_counter, time_elapsed)
520                self._call_run_once(constraints, profile_only,
521                                    postprocess_profiled_run, args, dargs)
522                test_iteration_finish = _get_time()
523                time_elapsed = test_iteration_finish - test_start
524            logging.debug('Test finished after %d iterations, '
525                          'time elapsed: %d s', timed_counter, time_elapsed)
526        else:
527            if iterations is None:
528                iterations = 1
529            if iterations > 1:
530                logging.debug('Test started. Specified %d iterations',
531                              iterations)
532            for self.iteration in range(1, iterations + 1):
533                if iterations > 1:
534                    logging.debug('Executing iteration %d of %d',
535                                  self.iteration, iterations)
536                self._call_run_once(constraints, profile_only,
537                                    postprocess_profiled_run, args, dargs)
538
539        if not profile_only:
540            self.iteration += 1
541            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
542
543        # Do any postprocessing, normally extracting performance keyvals, etc
544        self.postprocess()
545        self.process_failed_constraints()
546
547
548    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
549        profilers = self.job.profilers
550        # Do a profiling run if necessary
551        if profilers.present():
552            self.drop_caches_between_iterations()
553            profilers.before_start(self)
554
555            self.before_run_once()
556            profilers.start(self)
557            logging.debug('Profilers present. Profiling run started')
558
559            try:
560                self.run_once(*args, **dargs)
561
562                # Priority to the run_once() argument over the attribute.
563                postprocess_attribute = getattr(self,
564                                                'postprocess_profiled_run',
565                                                False)
566
567                if (postprocess_profiled_run or
568                    (postprocess_profiled_run is None and
569                     postprocess_attribute)):
570                    self.postprocess_iteration()
571
572            finally:
573                profilers.stop(self)
574                profilers.report(self)
575
576            self.after_run_once()
577
578
579    def postprocess(self):
580        pass
581
582
583    def postprocess_iteration(self):
584        pass
585
586
587    def cleanup(self):
588        pass
589
590
591    def before_run_once(self):
592        """
593        Override in tests that need it, will be called before any run_once()
594        call including the profiling run (when it's called before starting
595        the profilers).
596        """
597        pass
598
599
600    def after_run_once(self):
601        """
602        Called after every run_once (including from a profiled run when it's
603        called after stopping the profilers).
604        """
605        pass
606
607
608    @staticmethod
609    def _make_writable_to_others(directory):
610        mode = os.stat(directory).st_mode
611        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
612        os.chmod(directory, mode)
613
614
615    def _exec(self, args, dargs):
616        self.job.logging.tee_redirect_debug_dir(self.debugdir,
617                                                log_name=self.tagged_testname)
618        try:
619            # write out the test attributes into a keyval
620            dargs   = dargs.copy()
621            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
622            keyvals = dargs.pop('test_attributes', {}).copy()
623            keyvals['version'] = self.version
624            for i, arg in enumerate(args):
625                keyvals['param-%d' % i] = repr(arg)
626            for name, arg in six.iteritems(dargs):
627                keyvals['param-%s' % name] = repr(arg)
628            self.write_test_keyval(keyvals)
629
630            _validate_args(args, dargs, self.initialize, self.setup,
631                           self.execute, self.cleanup)
632
633            try:
634                # Make resultsdir and tmpdir accessible to everyone. We may
635                # output data to these directories as others, e.g., chronos.
636                self._make_writable_to_others(self.tmpdir)
637                self._make_writable_to_others(self.resultsdir)
638
639                # Initialize:
640                utils.cherry_pick_call(self.initialize, *args, **dargs)
641
642                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
643                try:
644                    fcntl.flock(lockfile, fcntl.LOCK_EX)
645                    # Setup: (compile and install the test, if needed)
646                    p_args, p_dargs = utils.cherry_pick_args(self.setup, args, dargs)
647                    utils.update_version(self.srcdir, self.preserve_srcdir,
648                                         self.version, self.setup,
649                                         *p_args, **p_dargs)
650                finally:
651                    fcntl.flock(lockfile, fcntl.LOCK_UN)
652                    lockfile.close()
653
654                # Execute:
655                os.chdir(self.outputdir)
656
657                # call self.warmup cherry picking the arguments it accepts and
658                # translate exceptions if needed
659                _call_test_function(utils.cherry_pick_call, self.warmup,
660                                    *args, **dargs)
661
662                if hasattr(self, 'run_once'):
663                    p_args, p_dargs = utils.cherry_pick_args(self.run_once,
664                                                        args, dargs)
665                    # pull in any non-* and non-** args from self.execute
666                    for param in utils.get_nonstar_args(self.execute):
667                        if param in dargs:
668                            p_dargs[param] = dargs[param]
669                else:
670                    p_args, p_dargs = utils.cherry_pick_args(self.execute,
671                                                        args, dargs)
672
673                _call_test_function(self.execute, *p_args, **p_dargs)
674            except Exception:
675                # Save the exception while we run our cleanup() before
676                # reraising it, but log it to so actual time of error is known.
677                exc_info = sys.exc_info()
678                logging.warning('The test failed with the following exception',
679                                exc_info=True)
680
681                try:
682                    try:
683                        if run_cleanup:
684                            logging.debug('Running cleanup for test.')
685                            utils.cherry_pick_call(self.cleanup, *args, **dargs)
686                    except Exception:
687                        logging.error('Ignoring exception during cleanup() '
688                                      'phase:')
689                        traceback.print_exc()
690                        logging.error('Now raising the earlier %s error',
691                                      exc_info[0])
692                    self.crash_handler_report()
693                finally:
694                    # Raise exception after running cleanup, reporting crash,
695                    # and restoring job's logging, even if the first two
696                    # actions fail.
697                    self.job.logging.restore()
698                    try:
699                        six.reraise(exc_info[0], exc_info[1], exc_info[2])
700                    finally:
701                        # http://docs.python.org/library/sys.html#sys.exc_info
702                        # Be nice and prevent a circular reference.
703                        del exc_info
704            else:
705                try:
706                    if run_cleanup:
707                        utils.cherry_pick_call(self.cleanup, *args, **dargs)
708                    self.crash_handler_report()
709                finally:
710                    self.job.logging.restore()
711        except error.AutotestError:
712            # Pass already-categorized errors on up.
713            raise
714        except Exception as e:
715            # Anything else is an ERROR in our own code, not execute().
716            raise error.UnhandledTestError(e)
717
718    def runsubtest(self, url, *args, **dargs):
719        """
720        Execute another autotest test from inside the current test's scope.
721
722        @param test: Parent test.
723        @param url: Url of new test.
724        @param tag: Tag added to test name.
725        @param args: Args for subtest.
726        @param dargs: Dictionary with args for subtest.
727        @iterations: Number of subtest iterations.
728        @profile_only: If true execute one profiled run.
729        """
730        dargs["profile_only"] = dargs.get("profile_only", False)
731        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
732        return self.job.run_test(url, main_testpath=test_basepath,
733                                 *args, **dargs)
734
735
736def _validate_args(args, dargs, *funcs):
737    """Verify that arguments are appropriate for at least one callable.
738
739    Given a list of callables as additional parameters, verify that
740    the proposed keyword arguments in dargs will each be accepted by at least
741    one of the callables.
742
743    NOTE: args is currently not supported and must be empty.
744
745    Args:
746      args: A tuple of proposed positional arguments.
747      dargs: A dictionary of proposed keyword arguments.
748      *funcs: Callables to be searched for acceptance of args and dargs.
749    Raises:
750      error.AutotestError: if an arg won't be accepted by any of *funcs.
751    """
752    all_co_flags = 0
753    all_varnames = ()
754    for func in funcs:
755        all_co_flags |= func.__code__.co_flags
756        all_varnames += func.__code__.co_varnames[:func.__code__.co_argcount]
757
758    # Check if given args belongs to at least one of the methods below.
759    if len(args) > 0:
760        # Current implementation doesn't allow the use of args.
761        raise error.TestError('Unnamed arguments not accepted. Please '
762                              'call job.run_test with named args only')
763
764    # Check if given dargs belongs to at least one of the methods below.
765    if len(dargs) > 0:
766        if not all_co_flags & 0x08:
767            # no func accepts *dargs, so:
768            for param in dargs:
769                if not param in all_varnames:
770                    raise error.AutotestError('Unknown parameter: %s' % param)
771
772
773def _installtest(job, url):
774    (group, name) = job.pkgmgr.get_package_name(url, 'test')
775
776    # Bail if the test is already installed
777    group_dir = os.path.join(job.testdir, "download", group)
778    if os.path.exists(os.path.join(group_dir, name)):
779        return (group, name)
780
781    # If the group directory is missing create it and add
782    # an empty  __init__.py so that sub-directories are
783    # considered for import.
784    if not os.path.exists(group_dir):
785        os.makedirs(group_dir)
786        f = open(os.path.join(group_dir, '__init__.py'), 'w+')
787        f.close()
788
789    logging.debug("%s: installing test url=%s", name, url)
790    tarball = os.path.basename(url)
791    tarball_path = os.path.join(group_dir, tarball)
792    test_dir = os.path.join(group_dir, name)
793    job.pkgmgr.fetch_pkg(tarball, tarball_path,
794                         repo_url = os.path.dirname(url))
795
796    # Create the directory for the test
797    if not os.path.exists(test_dir):
798        os.mkdir(os.path.join(group_dir, name))
799
800    job.pkgmgr.untar_pkg(tarball_path, test_dir)
801
802    os.remove(tarball_path)
803
804    # For this 'sub-object' to be importable via the name
805    # 'group.name' we need to provide an __init__.py,
806    # so link the main entry point to this.
807    os.symlink(name + '.py', os.path.join(group_dir, name,
808                            '__init__.py'))
809
810    # The test is now installed.
811    return (group, name)
812
813
814def _call_test_function(func, *args, **dargs):
815    """Calls a test function and translates exceptions so that errors
816    inside test code are considered test failures."""
817    try:
818        return func(*args, **dargs)
819    except error.AutotestError:
820        raise
821    except Exception as e:
822        # Other exceptions must be treated as a FAIL when
823        # raised during the test functions
824        raise error.UnhandledTestFail(e)
825
826
827def runtest(job,
828            url,
829            tag,
830            args,
831            dargs,
832            local_namespace={},
833            global_namespace={},
834            before_test_hook=None,
835            after_test_hook=None,
836            before_iteration_hook=None,
837            after_iteration_hook=None,
838            override_test_in_prog_file=None):
839    local_namespace = local_namespace.copy()
840    global_namespace = global_namespace.copy()
841    # if this is not a plain test name then download and install the
842    # specified test
843    if url.endswith('.tar.bz2'):
844        (testgroup, testname) = _installtest(job, url)
845        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
846        importdir = os.path.join(job.testdir, 'download')
847        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
848        classname = '%s.%s' % (modulename, testname)
849        path = testname
850    else:
851        # If the test is local, it may be under either testdir or site_testdir.
852        # Tests in site_testdir override tests defined in testdir
853        testname = path = url
854        testgroup = ''
855        path = re.sub(':', '/', testname)
856        modulename = os.path.basename(path)
857        classname = '%s.%s' % (modulename, modulename)
858
859        # Try installing the test package
860        # The job object may be either a server side job or a client side job.
861        # 'install_pkg' method will be present only if it's a client side job.
862        if hasattr(job, 'install_pkg'):
863            try:
864                bindir = os.path.join(job.testdir, testname)
865                job.install_pkg(testname, 'test', bindir)
866            except error.PackageInstallError:
867                # continue as a fall back mechanism and see if the test code
868                # already exists on the machine
869                pass
870
871        bindir = None
872        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
873            if dir is not None and os.path.exists(os.path.join(dir, path)):
874                importdir = bindir = os.path.join(dir, path)
875        if not bindir:
876            raise error.TestError(testname + ': test does not exist')
877
878    subdir = os.path.join(dargs.pop('main_testpath', ""), testname)
879    outputdir = os.path.join(job.resultdir, subdir)
880    if tag:
881        outputdir += '.' + tag
882
883    local_namespace['job'] = job
884    local_namespace['bindir'] = bindir
885    local_namespace['outputdir'] = outputdir
886
887    sys.path.insert(0, importdir)
888    try:
889        exec ('import %s' % modulename, local_namespace, global_namespace)
890        exec ("mytest = %s(job, bindir, outputdir)" % classname,
891              local_namespace, global_namespace)
892    finally:
893        sys.path.pop(0)
894
895    pwd = os.getcwd()
896    os.chdir(outputdir)
897
898    try:
899        mytest = global_namespace['mytest']
900        if hasattr(job, 'force_full_log_collection'):
901            mytest.force_full_log_collection = job.force_full_log_collection
902        if override_test_in_prog_file:
903            mytest.test_in_prog_file = override_test_in_prog_file
904        mytest.success = False
905        if before_test_hook:
906            logging.info('Starting before_hook for %s', mytest.tagged_testname)
907            with metrics.SecondsTimer(
908                    'chromeos/autotest/job/before_hook_duration'):
909                before_test_hook(mytest)
910            logging.info('before_hook completed')
911
912        # we use the register iteration hooks methods to register the passed
913        # in hooks
914        if before_iteration_hook:
915            mytest.register_before_iteration_hook(before_iteration_hook)
916        if after_iteration_hook:
917            mytest.register_after_iteration_hook(after_iteration_hook)
918        mytest._exec(args, dargs)
919        mytest.success = True
920    finally:
921        os.chdir(pwd)
922        if after_test_hook:
923            logging.info('Starting after_hook for %s', mytest.tagged_testname)
924            with metrics.SecondsTimer(
925                    'chromeos/autotest/job/after_hook_duration'):
926                after_test_hook(mytest)
927            logging.info('after_hook completed')
928
929        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
930