xref: /aosp_15_r20/external/autotest/client/common_lib/test.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1*9c5db199SXin Li# Lint as: python2, python3
2*9c5db199SXin Li# Shell class for a test, inherited by all individual tests
3*9c5db199SXin Li#
4*9c5db199SXin Li# Methods:
5*9c5db199SXin Li#       __init__        initialise
6*9c5db199SXin Li#       initialize      run once for each job
7*9c5db199SXin Li#       setup           run once for each new version of the test installed
8*9c5db199SXin Li#       run             run the test (wrapped by job.run_test())
9*9c5db199SXin Li#
10*9c5db199SXin Li# Data:
11*9c5db199SXin Li#       job             backreference to the job this test instance is part of
12*9c5db199SXin Li#       outputdir       eg. results/<job>/<testname.tag>
13*9c5db199SXin Li#       resultsdir      eg. results/<job>/<testname.tag>/results
14*9c5db199SXin Li#       profdir         eg. results/<job>/<testname.tag>/profiling
15*9c5db199SXin Li#       debugdir        eg. results/<job>/<testname.tag>/debug
16*9c5db199SXin Li#       bindir          eg. tests/<test>
17*9c5db199SXin Li#       src             eg. tests/<test>/src
18*9c5db199SXin Li#       tmpdir          eg. tmp/<tempname>_<testname.tag>
19*9c5db199SXin Li
20*9c5db199SXin Li#pylint: disable=C0111
21*9c5db199SXin Li
22*9c5db199SXin Lifrom __future__ import absolute_import
23*9c5db199SXin Lifrom __future__ import division
24*9c5db199SXin Lifrom __future__ import print_function
25*9c5db199SXin Li
26*9c5db199SXin Liimport errno
27*9c5db199SXin Liimport fcntl
28*9c5db199SXin Liimport json
29*9c5db199SXin Liimport logging
30*9c5db199SXin Liimport os
31*9c5db199SXin Liimport re
32*9c5db199SXin Liimport shutil
33*9c5db199SXin Liimport six
34*9c5db199SXin Lifrom six.moves import map
35*9c5db199SXin Lifrom six.moves import range
36*9c5db199SXin Liimport stat
37*9c5db199SXin Liimport sys
38*9c5db199SXin Liimport tempfile
39*9c5db199SXin Liimport time
40*9c5db199SXin Liimport traceback
41*9c5db199SXin Li
42*9c5db199SXin Lifrom autotest_lib.client.bin import utils
43*9c5db199SXin Lifrom autotest_lib.client.common_lib import error
44*9c5db199SXin Lifrom autotest_lib.client.common_lib import utils as client_utils
45*9c5db199SXin Li
46*9c5db199SXin Litry:
47*9c5db199SXin Li    from autotest_lib.utils.frozen_chromite.lib import metrics
48*9c5db199SXin Liexcept ImportError:
49*9c5db199SXin Li    metrics = client_utils.metrics_mock
50*9c5db199SXin Li
51*9c5db199SXin Li
52*9c5db199SXin Liclass base_test(object):
53*9c5db199SXin Li    preserve_srcdir = False
54*9c5db199SXin Li
55*9c5db199SXin Li    def __init__(self, job, bindir, outputdir):
56*9c5db199SXin Li        self.job = job
57*9c5db199SXin Li        self.pkgmgr = job.pkgmgr
58*9c5db199SXin Li        self.autodir = job.autodir
59*9c5db199SXin Li        self.outputdir = outputdir
60*9c5db199SXin Li        self.tagged_testname = os.path.basename(self.outputdir)
61*9c5db199SXin Li        self.resultsdir = os.path.join(self.outputdir, 'results')
62*9c5db199SXin Li        os.mkdir(self.resultsdir)
63*9c5db199SXin Li        self.profdir = os.path.join(self.outputdir, 'profiling')
64*9c5db199SXin Li        os.mkdir(self.profdir)
65*9c5db199SXin Li        self.debugdir = os.path.join(self.outputdir, 'debug')
66*9c5db199SXin Li        os.mkdir(self.debugdir)
67*9c5db199SXin Li        # TODO(ericli): figure out how autotest crash handler work with cros
68*9c5db199SXin Li        # Once this is re-enabled import getpass. crosbug.com/31232
69*9c5db199SXin Li        # crash handler, we should restore it in near term.
70*9c5db199SXin Li        # if getpass.getuser() == 'root':
71*9c5db199SXin Li        #     self.configure_crash_handler()
72*9c5db199SXin Li        # else:
73*9c5db199SXin Li        self.crash_handling_enabled = False
74*9c5db199SXin Li        self.bindir = bindir
75*9c5db199SXin Li        self.srcdir = os.path.join(self.bindir, 'src')
76*9c5db199SXin Li        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
77*9c5db199SXin Li                                       dir=job.tmpdir)
78*9c5db199SXin Li        # The crash_reporter uses this file to determine which test is in
79*9c5db199SXin Li        # progress.
80*9c5db199SXin Li        self.test_in_prog_file = '/run/crash_reporter/test-in-prog'
81*9c5db199SXin Li        self._keyvals = []
82*9c5db199SXin Li        self._new_keyval = False
83*9c5db199SXin Li        self.failed_constraints = []
84*9c5db199SXin Li        self.iteration = 0
85*9c5db199SXin Li        self.before_iteration_hooks = []
86*9c5db199SXin Li        self.after_iteration_hooks = []
87*9c5db199SXin Li
88*9c5db199SXin Li        # Flag to indicate if the test has succeeded or failed.
89*9c5db199SXin Li        self.success = False
90*9c5db199SXin Li
91*9c5db199SXin Li        # Flag to indicate if test results should be gathered when pass.
92*9c5db199SXin Li        self.collect_full_logs = False
93*9c5db199SXin Li
94*9c5db199SXin Li    def configure_crash_handler(self):
95*9c5db199SXin Li        pass
96*9c5db199SXin Li
97*9c5db199SXin Li
98*9c5db199SXin Li    def crash_handler_report(self):
99*9c5db199SXin Li        pass
100*9c5db199SXin Li
101*9c5db199SXin Li
102*9c5db199SXin Li    def assert_(self, expr, msg='Assertion failed.'):
103*9c5db199SXin Li        if not expr:
104*9c5db199SXin Li            raise error.TestError(msg)
105*9c5db199SXin Li
106*9c5db199SXin Li
107*9c5db199SXin Li    def write_test_keyval(self, attr_dict):
108*9c5db199SXin Li        utils.write_keyval(self.outputdir, attr_dict)
109*9c5db199SXin Li
110*9c5db199SXin Li
111*9c5db199SXin Li    @staticmethod
112*9c5db199SXin Li    def _append_type_to_keys(dictionary, typename):
113*9c5db199SXin Li        new_dict = {}
114*9c5db199SXin Li        for key, value in six.iteritems(dictionary):
115*9c5db199SXin Li            new_key = "%s{%s}" % (key, typename)
116*9c5db199SXin Li            new_dict[new_key] = value
117*9c5db199SXin Li        return new_dict
118*9c5db199SXin Li
119*9c5db199SXin Li    def output_perf_value(self, description, value, units=None,
120*9c5db199SXin Li                          higher_is_better=None, graph=None,
121*9c5db199SXin Li                          replacement='_', replace_existing_values=False,
122*9c5db199SXin Li                          resultsdir=None):
123*9c5db199SXin Li        """
124*9c5db199SXin Li        Records a measured performance value in an output file.
125*9c5db199SXin Li
126*9c5db199SXin Li        The output file will subsequently be parsed by the TKO parser to have
127*9c5db199SXin Li        the information inserted into the results database.
128*9c5db199SXin Li
129*9c5db199SXin Li        @param description: A string describing the measured perf value. Must
130*9c5db199SXin Li                be maximum length 256, and may only contain letters, numbers,
131*9c5db199SXin Li                periods, dashes, and underscores.  For example:
132*9c5db199SXin Li                "page_load_time", "scrolling-frame-rate".
133*9c5db199SXin Li        @param value: A number representing the measured perf value, or a list
134*9c5db199SXin Li                of measured values if a test takes multiple measurements.
135*9c5db199SXin Li                Measured perf values can be either ints or floats.
136*9c5db199SXin Li        @param units: A string describing the units associated with the
137*9c5db199SXin Li                measured perf value. Must be maximum length 32, and may only
138*9c5db199SXin Li                contain letters, numbers, periods, dashes, and underscores.
139*9c5db199SXin Li                For example: "msec", "fps", "score", "runs_per_second".
140*9c5db199SXin Li        @param higher_is_better: A boolean indicating whether or not a "higher"
141*9c5db199SXin Li                measured perf value is considered to be better. If False, it is
142*9c5db199SXin Li                assumed that a "lower" measured value is considered to be
143*9c5db199SXin Li                better. This impacts dashboard plotting and email notification.
144*9c5db199SXin Li                Pure autotests are expected to specify either True or False!
145*9c5db199SXin Li                This value can be set to "None" to indicate that the perf
146*9c5db199SXin Li                dashboard should apply the rules encoded via Chromium
147*9c5db199SXin Li                unit-info.json. This is only used for tracking Chromium based
148*9c5db199SXin Li                tests (in particular telemetry).
149*9c5db199SXin Li        @param graph: A string indicating the name of the graph on which
150*9c5db199SXin Li                the perf value will be subsequently displayed on the chrome perf
151*9c5db199SXin Li                dashboard. This allows multiple metrics be grouped together on
152*9c5db199SXin Li                the same graphs. Defaults to None, indicating that the perf
153*9c5db199SXin Li                value should be displayed individually on a separate graph.
154*9c5db199SXin Li        @param replacement: string to replace illegal characters in
155*9c5db199SXin Li                |description| and |units| with.
156*9c5db199SXin Li        @param replace_existing_values: A boolean indicating whether or not a
157*9c5db199SXin Li                new added perf value should replace existing perf.
158*9c5db199SXin Li        @param resultsdir: An optional path to specify a custom output
159*9c5db199SXin Li                directory.
160*9c5db199SXin Li        """
161*9c5db199SXin Li        if len(description) > 256:
162*9c5db199SXin Li            raise ValueError('The description must be at most 256 characters.')
163*9c5db199SXin Li        if units and len(units) > 32:
164*9c5db199SXin Li            raise ValueError('The units must be at most 32 characters.')
165*9c5db199SXin Li
166*9c5db199SXin Li        # If |replacement| is legal replace illegal characters with it.
167*9c5db199SXin Li        string_regex = re.compile(r'[^-\.\w]')
168*9c5db199SXin Li        if replacement is None or re.search(string_regex, replacement):
169*9c5db199SXin Li            raise ValueError('Invalid replacement string to mask illegal '
170*9c5db199SXin Li                             'characters. May only contain letters, numbers, '
171*9c5db199SXin Li                             'periods, dashes, and underscores. '
172*9c5db199SXin Li                             'replacement: %s' % replacement)
173*9c5db199SXin Li        description = re.sub(string_regex, replacement, description)
174*9c5db199SXin Li        units = re.sub(string_regex, replacement, units) if units else None
175*9c5db199SXin Li
176*9c5db199SXin Li        charts = {}
177*9c5db199SXin Li        if not resultsdir:
178*9c5db199SXin Li            resultsdir = self.resultsdir
179*9c5db199SXin Li        if not os.path.exists(resultsdir):
180*9c5db199SXin Li            os.makedirs(resultsdir)
181*9c5db199SXin Li        output_file = os.path.join(resultsdir, 'results-chart.json')
182*9c5db199SXin Li        if os.path.isfile(output_file):
183*9c5db199SXin Li            with open(output_file, 'r') as fp:
184*9c5db199SXin Li                contents = fp.read()
185*9c5db199SXin Li                if contents:
186*9c5db199SXin Li                    charts = json.loads(contents)
187*9c5db199SXin Li
188*9c5db199SXin Li        if graph:
189*9c5db199SXin Li            first_level = graph
190*9c5db199SXin Li            second_level = description
191*9c5db199SXin Li        else:
192*9c5db199SXin Li            first_level = description
193*9c5db199SXin Li            second_level = 'summary'
194*9c5db199SXin Li
195*9c5db199SXin Li        direction = 'up' if higher_is_better else 'down'
196*9c5db199SXin Li
197*9c5db199SXin Li        # All input should be a number - but at times there are strings
198*9c5db199SXin Li        # representing numbers logged, attempt to convert them to numbers.
199*9c5db199SXin Li        # If a non number string is logged an exception will be thrown.
200*9c5db199SXin Li        if isinstance(value, list):
201*9c5db199SXin Li            value = list(map(float, value))
202*9c5db199SXin Li        else:
203*9c5db199SXin Li            value = float(value)
204*9c5db199SXin Li
205*9c5db199SXin Li        result_type = 'scalar'
206*9c5db199SXin Li        value_key = 'value'
207*9c5db199SXin Li        result_value = value
208*9c5db199SXin Li
209*9c5db199SXin Li        # The chart json spec go/telemetry-json differenciates between a single
210*9c5db199SXin Li        # value vs a list of values.  Lists of values get extra processing in
211*9c5db199SXin Li        # the chromeperf dashboard ( mean, standard deviation etc)
212*9c5db199SXin Li        # Tests can log one or more values for the same metric, to adhere stricly
213*9c5db199SXin Li        # to the specification the first value logged is a scalar but if another
214*9c5db199SXin Li        # value is logged the results become a list of scalar.
215*9c5db199SXin Li        # TODO Figure out if there would be any difference of always using list
216*9c5db199SXin Li        # of scalar even if there is just one item in the list.
217*9c5db199SXin Li        if isinstance(value, list):
218*9c5db199SXin Li            result_type = 'list_of_scalar_values'
219*9c5db199SXin Li            value_key = 'values'
220*9c5db199SXin Li            if first_level in charts and second_level in charts[first_level]:
221*9c5db199SXin Li                if 'values' in charts[first_level][second_level]:
222*9c5db199SXin Li                    result_value = charts[first_level][second_level]['values']
223*9c5db199SXin Li                elif 'value' in charts[first_level][second_level]:
224*9c5db199SXin Li                    result_value = [charts[first_level][second_level]['value']]
225*9c5db199SXin Li                if replace_existing_values:
226*9c5db199SXin Li                    result_value = value
227*9c5db199SXin Li                else:
228*9c5db199SXin Li                    result_value.extend(value)
229*9c5db199SXin Li            else:
230*9c5db199SXin Li                result_value = value
231*9c5db199SXin Li        elif (first_level in charts and second_level in charts[first_level] and
232*9c5db199SXin Li              not replace_existing_values):
233*9c5db199SXin Li            result_type = 'list_of_scalar_values'
234*9c5db199SXin Li            value_key = 'values'
235*9c5db199SXin Li            if 'values' in charts[first_level][second_level]:
236*9c5db199SXin Li                result_value = charts[first_level][second_level]['values']
237*9c5db199SXin Li                result_value.append(value)
238*9c5db199SXin Li            else:
239*9c5db199SXin Li                result_value = [charts[first_level][second_level]['value'], value]
240*9c5db199SXin Li
241*9c5db199SXin Li        test_data = {
242*9c5db199SXin Li            second_level: {
243*9c5db199SXin Li                 'type': result_type,
244*9c5db199SXin Li                 'units': units,
245*9c5db199SXin Li                 value_key: result_value,
246*9c5db199SXin Li                 'improvement_direction': direction
247*9c5db199SXin Li           }
248*9c5db199SXin Li        }
249*9c5db199SXin Li
250*9c5db199SXin Li        if first_level in charts:
251*9c5db199SXin Li            charts[first_level].update(test_data)
252*9c5db199SXin Li        else:
253*9c5db199SXin Li            charts.update({first_level: test_data})
254*9c5db199SXin Li
255*9c5db199SXin Li        with open(output_file, 'w') as fp:
256*9c5db199SXin Li            fp.write(json.dumps(charts, indent=2))
257*9c5db199SXin Li
258*9c5db199SXin Li
259*9c5db199SXin Li    def write_perf_keyval(self, perf_dict):
260*9c5db199SXin Li        self.write_iteration_keyval({}, perf_dict)
261*9c5db199SXin Li
262*9c5db199SXin Li
263*9c5db199SXin Li    def write_attr_keyval(self, attr_dict):
264*9c5db199SXin Li        self.write_iteration_keyval(attr_dict, {})
265*9c5db199SXin Li
266*9c5db199SXin Li
267*9c5db199SXin Li    def write_iteration_keyval(self, attr_dict, perf_dict):
268*9c5db199SXin Li        # append the dictionaries before they have the {perf} and {attr} added
269*9c5db199SXin Li        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
270*9c5db199SXin Li        self._new_keyval = True
271*9c5db199SXin Li
272*9c5db199SXin Li        if attr_dict:
273*9c5db199SXin Li            attr_dict = self._append_type_to_keys(attr_dict, "attr")
274*9c5db199SXin Li            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
275*9c5db199SXin Li
276*9c5db199SXin Li        if perf_dict:
277*9c5db199SXin Li            perf_dict = self._append_type_to_keys(perf_dict, "perf")
278*9c5db199SXin Li            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
279*9c5db199SXin Li
280*9c5db199SXin Li        keyval_path = os.path.join(self.resultsdir, "keyval")
281*9c5db199SXin Li        print("", file=open(keyval_path, "a"))
282*9c5db199SXin Li
283*9c5db199SXin Li
284*9c5db199SXin Li    def analyze_perf_constraints(self, constraints):
285*9c5db199SXin Li        if not self._new_keyval:
286*9c5db199SXin Li            return
287*9c5db199SXin Li
288*9c5db199SXin Li        # create a dict from the keyvals suitable as an environment for eval
289*9c5db199SXin Li        keyval_env = self._keyvals[-1]['perf'].copy()
290*9c5db199SXin Li        keyval_env['__builtins__'] = None
291*9c5db199SXin Li        self._new_keyval = False
292*9c5db199SXin Li        failures = []
293*9c5db199SXin Li
294*9c5db199SXin Li        # evaluate each constraint using the current keyvals
295*9c5db199SXin Li        for constraint in constraints:
296*9c5db199SXin Li            logging.info('___________________ constraint = %s', constraint)
297*9c5db199SXin Li            logging.info('___________________ keyvals = %s', keyval_env)
298*9c5db199SXin Li
299*9c5db199SXin Li            try:
300*9c5db199SXin Li                if not eval(constraint, keyval_env):
301*9c5db199SXin Li                    failures.append('%s: constraint was not met' % constraint)
302*9c5db199SXin Li            except:
303*9c5db199SXin Li                failures.append('could not evaluate constraint: %s'
304*9c5db199SXin Li                                % constraint)
305*9c5db199SXin Li
306*9c5db199SXin Li        # keep track of the errors for each iteration
307*9c5db199SXin Li        self.failed_constraints.append(failures)
308*9c5db199SXin Li
309*9c5db199SXin Li
310*9c5db199SXin Li    def process_failed_constraints(self):
311*9c5db199SXin Li        msg = ''
312*9c5db199SXin Li        for i, failures in enumerate(self.failed_constraints):
313*9c5db199SXin Li            if failures:
314*9c5db199SXin Li                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
315*9c5db199SXin Li
316*9c5db199SXin Li        if msg:
317*9c5db199SXin Li            raise error.TestFail(msg)
318*9c5db199SXin Li
319*9c5db199SXin Li
320*9c5db199SXin Li    def register_before_iteration_hook(self, iteration_hook):
321*9c5db199SXin Li        """
322*9c5db199SXin Li        This is how we expect test writers to register a before_iteration_hook.
323*9c5db199SXin Li        This adds the method to the list of hooks which are executed
324*9c5db199SXin Li        before each iteration.
325*9c5db199SXin Li
326*9c5db199SXin Li        @param iteration_hook: Method to run before each iteration. A valid
327*9c5db199SXin Li                               hook accepts a single argument which is the
328*9c5db199SXin Li                               test object.
329*9c5db199SXin Li        """
330*9c5db199SXin Li        self.before_iteration_hooks.append(iteration_hook)
331*9c5db199SXin Li
332*9c5db199SXin Li
333*9c5db199SXin Li    def register_after_iteration_hook(self, iteration_hook):
334*9c5db199SXin Li        """
335*9c5db199SXin Li        This is how we expect test writers to register an after_iteration_hook.
336*9c5db199SXin Li        This adds the method to the list of hooks which are executed
337*9c5db199SXin Li        after each iteration. Hooks are executed starting with the most-
338*9c5db199SXin Li        recently registered, in stack fashion.
339*9c5db199SXin Li
340*9c5db199SXin Li        @param iteration_hook: Method to run after each iteration. A valid
341*9c5db199SXin Li                               hook accepts a single argument which is the
342*9c5db199SXin Li                               test object.
343*9c5db199SXin Li        """
344*9c5db199SXin Li        self.after_iteration_hooks.append(iteration_hook)
345*9c5db199SXin Li
346*9c5db199SXin Li
347*9c5db199SXin Li    def initialize(self):
348*9c5db199SXin Li        pass
349*9c5db199SXin Li
350*9c5db199SXin Li
351*9c5db199SXin Li    def setup(self):
352*9c5db199SXin Li        pass
353*9c5db199SXin Li
354*9c5db199SXin Li
355*9c5db199SXin Li    def warmup(self, *args, **dargs):
356*9c5db199SXin Li        pass
357*9c5db199SXin Li
358*9c5db199SXin Li
359*9c5db199SXin Li    def drop_caches_between_iterations(self):
360*9c5db199SXin Li        if self.job.drop_caches_between_iterations:
361*9c5db199SXin Li            utils.drop_caches()
362*9c5db199SXin Li
363*9c5db199SXin Li
364*9c5db199SXin Li    def _call_run_once(self, constraints, profile_only,
365*9c5db199SXin Li                       postprocess_profiled_run, args, dargs):
366*9c5db199SXin Li        self.drop_caches_between_iterations()
367*9c5db199SXin Li        # execute iteration hooks
368*9c5db199SXin Li        if not self.job.fast:
369*9c5db199SXin Li            logging.debug('Starting before_iteration_hooks for %s',
370*9c5db199SXin Li                          self.tagged_testname)
371*9c5db199SXin Li            with metrics.SecondsTimer(
372*9c5db199SXin Li                    'chromeos/autotest/job/before_iteration_hook_duration'):
373*9c5db199SXin Li                for hook in self.before_iteration_hooks:
374*9c5db199SXin Li                    hook(self)
375*9c5db199SXin Li            logging.debug('before_iteration_hooks completed')
376*9c5db199SXin Li
377*9c5db199SXin Li        finished = False
378*9c5db199SXin Li
379*9c5db199SXin Li        # Mark the current test in progress so that crash_reporter can report
380*9c5db199SXin Li        # it in uploaded crashes.
381*9c5db199SXin Li        # if the file already exists, truncate and overwrite.
382*9c5db199SXin Li        # TODO(mutexlox): Determine what to do if the file already exists, which
383*9c5db199SXin Li        # could happen for a few reasons:
384*9c5db199SXin Li        #   * An earlier tast or autotest run crashed before removing the
385*9c5db199SXin Li        #     test-in-prog file. In this case, we would ideally overwrite the
386*9c5db199SXin Li        #     existing test name and _not_ restore it.
387*9c5db199SXin Li        #   * An autotest ran another autotest (e.g. logging_GenerateCrashFiles
388*9c5db199SXin Li        #     runs desktopui_SimpleLogin). In this case, arguably it makes sense
389*9c5db199SXin Li        #     to attribute the crash to logging_GenerateCrashFiles and not to
390*9c5db199SXin Li        #     desktopui_SimpleLogin (or to attribute it to both), since the
391*9c5db199SXin Li        #     context in which it's running is different than if it were run on
392*9c5db199SXin Li        #     its own.
393*9c5db199SXin Li        #   * Every tast test is kicked off by the 'tast' autotest (see
394*9c5db199SXin Li        #     server/site_tests/tast/tast.py), so the file will always be
395*9c5db199SXin Li        #     populated when the tast suite starts running. In this case, we
396*9c5db199SXin Li        #     want to attribute crashes that happen during a specific test to
397*9c5db199SXin Li        #     that test, but if the tast infra causes a crash we should
398*9c5db199SXin Li        #     attribute the crash to it (e.g. to the 'tast.critical-system'
399*9c5db199SXin Li        #     "autotest").  For this case, we should save the contents of the
400*9c5db199SXin Li        #     file before a test and restore it after.
401*9c5db199SXin Li        if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
402*9c5db199SXin Li                dargs['host'].is_up_fast()):
403*9c5db199SXin Li            dargs['host'].run('echo %s > %s' %
404*9c5db199SXin Li                              (self.tagged_testname, self.test_in_prog_file),
405*9c5db199SXin Li                              ignore_status=True)
406*9c5db199SXin Li        else:
407*9c5db199SXin Li            crash_run_dir = os.path.dirname(self.test_in_prog_file)
408*9c5db199SXin Li            try:
409*9c5db199SXin Li                # Only try to create the file if the directory already exists
410*9c5db199SXin Li                # (otherwise, we may not be on a CrOS device)
411*9c5db199SXin Li                if os.path.exists(crash_run_dir):
412*9c5db199SXin Li                    with open(self.test_in_prog_file, 'w') as f:
413*9c5db199SXin Li                        f.write(self.tagged_testname)
414*9c5db199SXin Li            except:  # Broad 'except' because we don't want this to block tests
415*9c5db199SXin Li                logging.warning('failed to write in progress test name')
416*9c5db199SXin Li        try:
417*9c5db199SXin Li            if profile_only:
418*9c5db199SXin Li                if not self.job.profilers.present():
419*9c5db199SXin Li                    self.job.record('WARN', None, None,
420*9c5db199SXin Li                                    'No profilers have been added but '
421*9c5db199SXin Li                                    'profile_only is set - nothing '
422*9c5db199SXin Li                                    'will be run')
423*9c5db199SXin Li                self.run_once_profiling(postprocess_profiled_run,
424*9c5db199SXin Li                                        *args, **dargs)
425*9c5db199SXin Li            else:
426*9c5db199SXin Li                self.before_run_once()
427*9c5db199SXin Li                logging.debug('starting test(run_once()), test details follow'
428*9c5db199SXin Li                              '\nargs: %r\nkwargs: %r', args, dargs)
429*9c5db199SXin Li                self.run_once(*args, **dargs)
430*9c5db199SXin Li                logging.debug('The test has completed successfully')
431*9c5db199SXin Li                self.after_run_once()
432*9c5db199SXin Li
433*9c5db199SXin Li            self.postprocess_iteration()
434*9c5db199SXin Li            self.analyze_perf_constraints(constraints)
435*9c5db199SXin Li            finished = True
436*9c5db199SXin Li        # Catch and re-raise to let after_iteration_hooks see the exception.
437*9c5db199SXin Li        except Exception as e:
438*9c5db199SXin Li            logging.debug('Test failed due to %s. Exception log follows the '
439*9c5db199SXin Li                          'after_iteration_hooks.', str(e))
440*9c5db199SXin Li            raise
441*9c5db199SXin Li        finally:
442*9c5db199SXin Li            if ('host' in dargs and hasattr(dargs['host'], 'is_up_fast') and
443*9c5db199SXin Li                    dargs['host'].is_up_fast()):
444*9c5db199SXin Li                dargs['host'].run('rm -f %s' % self.test_in_prog_file)
445*9c5db199SXin Li            else:
446*9c5db199SXin Li                try:
447*9c5db199SXin Li                    # Unmark the test as running.
448*9c5db199SXin Li                    os.remove(self.test_in_prog_file)
449*9c5db199SXin Li                except OSError as e:
450*9c5db199SXin Li                    # If something removed it, do nothing--we're in the desired
451*9c5db199SXin Li                    # state (the file is gone). Otherwise, log.
452*9c5db199SXin Li                    if e.errno != errno.ENOENT:
453*9c5db199SXin Li                        logging.warning(
454*9c5db199SXin Li                                "Couldn't remove test-in-prog file: %s",
455*9c5db199SXin Li                                traceback.format_exc())
456*9c5db199SXin Li
457*9c5db199SXin Li            if not finished or not self.job.fast:
458*9c5db199SXin Li                logging.debug('Starting after_iteration_hooks for %s',
459*9c5db199SXin Li                              self.tagged_testname)
460*9c5db199SXin Li                with metrics.SecondsTimer(
461*9c5db199SXin Li                        'chromeos/autotest/job/after_iteration_hook_duration'):
462*9c5db199SXin Li                    for hook in reversed(self.after_iteration_hooks):
463*9c5db199SXin Li                        hook(self)
464*9c5db199SXin Li                logging.debug('after_iteration_hooks completed')
465*9c5db199SXin Li
466*9c5db199SXin Li
467*9c5db199SXin Li    def execute(self, iterations=None, test_length=None, profile_only=None,
468*9c5db199SXin Li                _get_time=time.time, postprocess_profiled_run=None,
469*9c5db199SXin Li                constraints=(), *args, **dargs):
470*9c5db199SXin Li        """
471*9c5db199SXin Li        This is the basic execute method for the tests inherited from base_test.
472*9c5db199SXin Li        If you want to implement a benchmark test, it's better to implement
473*9c5db199SXin Li        the run_once function, to cope with the profiling infrastructure. For
474*9c5db199SXin Li        other tests, you can just override the default implementation.
475*9c5db199SXin Li
476*9c5db199SXin Li        @param test_length: The minimum test length in seconds. We'll run the
477*9c5db199SXin Li            run_once function for a number of times large enough to cover the
478*9c5db199SXin Li            minimum test length.
479*9c5db199SXin Li
480*9c5db199SXin Li        @param iterations: A number of iterations that we'll run the run_once
481*9c5db199SXin Li            function. This parameter is incompatible with test_length and will
482*9c5db199SXin Li            be silently ignored if you specify both.
483*9c5db199SXin Li
484*9c5db199SXin Li        @param profile_only: If true run X iterations with profilers enabled.
485*9c5db199SXin Li            If false run X iterations and one with profiling if profiles are
486*9c5db199SXin Li            enabled. If None, default to the value of job.default_profile_only.
487*9c5db199SXin Li
488*9c5db199SXin Li        @param _get_time: [time.time] Used for unit test time injection.
489*9c5db199SXin Li
490*9c5db199SXin Li        @param postprocess_profiled_run: Run the postprocessing for the
491*9c5db199SXin Li            profiled run.
492*9c5db199SXin Li        """
493*9c5db199SXin Li
494*9c5db199SXin Li        # For our special class of tests, the benchmarks, we don't want
495*9c5db199SXin Li        # profilers to run during the test iterations. Let's reserve only
496*9c5db199SXin Li        # the last iteration for profiling, if needed. So let's stop
497*9c5db199SXin Li        # all profilers if they are present and active.
498*9c5db199SXin Li        profilers = self.job.profilers
499*9c5db199SXin Li        if profilers.active():
500*9c5db199SXin Li            profilers.stop(self)
501*9c5db199SXin Li        if profile_only is None:
502*9c5db199SXin Li            profile_only = self.job.default_profile_only
503*9c5db199SXin Li        # If the user called this test in an odd way (specified both iterations
504*9c5db199SXin Li        # and test_length), let's warn them.
505*9c5db199SXin Li        if iterations and test_length:
506*9c5db199SXin Li            logging.debug('Iterations parameter ignored (timed execution)')
507*9c5db199SXin Li        if test_length:
508*9c5db199SXin Li            test_start = _get_time()
509*9c5db199SXin Li            time_elapsed = 0
510*9c5db199SXin Li            timed_counter = 0
511*9c5db199SXin Li            logging.debug('Test started. Specified %d s as the minimum test '
512*9c5db199SXin Li                          'length', test_length)
513*9c5db199SXin Li            while time_elapsed < test_length:
514*9c5db199SXin Li                timed_counter = timed_counter + 1
515*9c5db199SXin Li                if time_elapsed == 0:
516*9c5db199SXin Li                    logging.debug('Executing iteration %d', timed_counter)
517*9c5db199SXin Li                elif time_elapsed > 0:
518*9c5db199SXin Li                    logging.debug('Executing iteration %d, time_elapsed %d s',
519*9c5db199SXin Li                                  timed_counter, time_elapsed)
520*9c5db199SXin Li                self._call_run_once(constraints, profile_only,
521*9c5db199SXin Li                                    postprocess_profiled_run, args, dargs)
522*9c5db199SXin Li                test_iteration_finish = _get_time()
523*9c5db199SXin Li                time_elapsed = test_iteration_finish - test_start
524*9c5db199SXin Li            logging.debug('Test finished after %d iterations, '
525*9c5db199SXin Li                          'time elapsed: %d s', timed_counter, time_elapsed)
526*9c5db199SXin Li        else:
527*9c5db199SXin Li            if iterations is None:
528*9c5db199SXin Li                iterations = 1
529*9c5db199SXin Li            if iterations > 1:
530*9c5db199SXin Li                logging.debug('Test started. Specified %d iterations',
531*9c5db199SXin Li                              iterations)
532*9c5db199SXin Li            for self.iteration in range(1, iterations + 1):
533*9c5db199SXin Li                if iterations > 1:
534*9c5db199SXin Li                    logging.debug('Executing iteration %d of %d',
535*9c5db199SXin Li                                  self.iteration, iterations)
536*9c5db199SXin Li                self._call_run_once(constraints, profile_only,
537*9c5db199SXin Li                                    postprocess_profiled_run, args, dargs)
538*9c5db199SXin Li
539*9c5db199SXin Li        if not profile_only:
540*9c5db199SXin Li            self.iteration += 1
541*9c5db199SXin Li            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
542*9c5db199SXin Li
543*9c5db199SXin Li        # Do any postprocessing, normally extracting performance keyvals, etc
544*9c5db199SXin Li        self.postprocess()
545*9c5db199SXin Li        self.process_failed_constraints()
546*9c5db199SXin Li
547*9c5db199SXin Li
548*9c5db199SXin Li    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
549*9c5db199SXin Li        profilers = self.job.profilers
550*9c5db199SXin Li        # Do a profiling run if necessary
551*9c5db199SXin Li        if profilers.present():
552*9c5db199SXin Li            self.drop_caches_between_iterations()
553*9c5db199SXin Li            profilers.before_start(self)
554*9c5db199SXin Li
555*9c5db199SXin Li            self.before_run_once()
556*9c5db199SXin Li            profilers.start(self)
557*9c5db199SXin Li            logging.debug('Profilers present. Profiling run started')
558*9c5db199SXin Li
559*9c5db199SXin Li            try:
560*9c5db199SXin Li                self.run_once(*args, **dargs)
561*9c5db199SXin Li
562*9c5db199SXin Li                # Priority to the run_once() argument over the attribute.
563*9c5db199SXin Li                postprocess_attribute = getattr(self,
564*9c5db199SXin Li                                                'postprocess_profiled_run',
565*9c5db199SXin Li                                                False)
566*9c5db199SXin Li
567*9c5db199SXin Li                if (postprocess_profiled_run or
568*9c5db199SXin Li                    (postprocess_profiled_run is None and
569*9c5db199SXin Li                     postprocess_attribute)):
570*9c5db199SXin Li                    self.postprocess_iteration()
571*9c5db199SXin Li
572*9c5db199SXin Li            finally:
573*9c5db199SXin Li                profilers.stop(self)
574*9c5db199SXin Li                profilers.report(self)
575*9c5db199SXin Li
576*9c5db199SXin Li            self.after_run_once()
577*9c5db199SXin Li
578*9c5db199SXin Li
579*9c5db199SXin Li    def postprocess(self):
580*9c5db199SXin Li        pass
581*9c5db199SXin Li
582*9c5db199SXin Li
583*9c5db199SXin Li    def postprocess_iteration(self):
584*9c5db199SXin Li        pass
585*9c5db199SXin Li
586*9c5db199SXin Li
587*9c5db199SXin Li    def cleanup(self):
588*9c5db199SXin Li        pass
589*9c5db199SXin Li
590*9c5db199SXin Li
591*9c5db199SXin Li    def before_run_once(self):
592*9c5db199SXin Li        """
593*9c5db199SXin Li        Override in tests that need it, will be called before any run_once()
594*9c5db199SXin Li        call including the profiling run (when it's called before starting
595*9c5db199SXin Li        the profilers).
596*9c5db199SXin Li        """
597*9c5db199SXin Li        pass
598*9c5db199SXin Li
599*9c5db199SXin Li
600*9c5db199SXin Li    def after_run_once(self):
601*9c5db199SXin Li        """
602*9c5db199SXin Li        Called after every run_once (including from a profiled run when it's
603*9c5db199SXin Li        called after stopping the profilers).
604*9c5db199SXin Li        """
605*9c5db199SXin Li        pass
606*9c5db199SXin Li
607*9c5db199SXin Li
608*9c5db199SXin Li    @staticmethod
609*9c5db199SXin Li    def _make_writable_to_others(directory):
610*9c5db199SXin Li        mode = os.stat(directory).st_mode
611*9c5db199SXin Li        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
612*9c5db199SXin Li        os.chmod(directory, mode)
613*9c5db199SXin Li
614*9c5db199SXin Li
615*9c5db199SXin Li    def _exec(self, args, dargs):
616*9c5db199SXin Li        self.job.logging.tee_redirect_debug_dir(self.debugdir,
617*9c5db199SXin Li                                                log_name=self.tagged_testname)
618*9c5db199SXin Li        try:
619*9c5db199SXin Li            # write out the test attributes into a keyval
620*9c5db199SXin Li            dargs   = dargs.copy()
621*9c5db199SXin Li            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
622*9c5db199SXin Li            keyvals = dargs.pop('test_attributes', {}).copy()
623*9c5db199SXin Li            keyvals['version'] = self.version
624*9c5db199SXin Li            for i, arg in enumerate(args):
625*9c5db199SXin Li                keyvals['param-%d' % i] = repr(arg)
626*9c5db199SXin Li            for name, arg in six.iteritems(dargs):
627*9c5db199SXin Li                keyvals['param-%s' % name] = repr(arg)
628*9c5db199SXin Li            self.write_test_keyval(keyvals)
629*9c5db199SXin Li
630*9c5db199SXin Li            _validate_args(args, dargs, self.initialize, self.setup,
631*9c5db199SXin Li                           self.execute, self.cleanup)
632*9c5db199SXin Li
633*9c5db199SXin Li            try:
634*9c5db199SXin Li                # Make resultsdir and tmpdir accessible to everyone. We may
635*9c5db199SXin Li                # output data to these directories as others, e.g., chronos.
636*9c5db199SXin Li                self._make_writable_to_others(self.tmpdir)
637*9c5db199SXin Li                self._make_writable_to_others(self.resultsdir)
638*9c5db199SXin Li
639*9c5db199SXin Li                # Initialize:
640*9c5db199SXin Li                utils.cherry_pick_call(self.initialize, *args, **dargs)
641*9c5db199SXin Li
642*9c5db199SXin Li                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
643*9c5db199SXin Li                try:
644*9c5db199SXin Li                    fcntl.flock(lockfile, fcntl.LOCK_EX)
645*9c5db199SXin Li                    # Setup: (compile and install the test, if needed)
646*9c5db199SXin Li                    p_args, p_dargs = utils.cherry_pick_args(self.setup, args, dargs)
647*9c5db199SXin Li                    utils.update_version(self.srcdir, self.preserve_srcdir,
648*9c5db199SXin Li                                         self.version, self.setup,
649*9c5db199SXin Li                                         *p_args, **p_dargs)
650*9c5db199SXin Li                finally:
651*9c5db199SXin Li                    fcntl.flock(lockfile, fcntl.LOCK_UN)
652*9c5db199SXin Li                    lockfile.close()
653*9c5db199SXin Li
654*9c5db199SXin Li                # Execute:
655*9c5db199SXin Li                os.chdir(self.outputdir)
656*9c5db199SXin Li
657*9c5db199SXin Li                # call self.warmup cherry picking the arguments it accepts and
658*9c5db199SXin Li                # translate exceptions if needed
659*9c5db199SXin Li                _call_test_function(utils.cherry_pick_call, self.warmup,
660*9c5db199SXin Li                                    *args, **dargs)
661*9c5db199SXin Li
662*9c5db199SXin Li                if hasattr(self, 'run_once'):
663*9c5db199SXin Li                    p_args, p_dargs = utils.cherry_pick_args(self.run_once,
664*9c5db199SXin Li                                                        args, dargs)
665*9c5db199SXin Li                    # pull in any non-* and non-** args from self.execute
666*9c5db199SXin Li                    for param in utils.get_nonstar_args(self.execute):
667*9c5db199SXin Li                        if param in dargs:
668*9c5db199SXin Li                            p_dargs[param] = dargs[param]
669*9c5db199SXin Li                else:
670*9c5db199SXin Li                    p_args, p_dargs = utils.cherry_pick_args(self.execute,
671*9c5db199SXin Li                                                        args, dargs)
672*9c5db199SXin Li
673*9c5db199SXin Li                _call_test_function(self.execute, *p_args, **p_dargs)
674*9c5db199SXin Li            except Exception:
675*9c5db199SXin Li                # Save the exception while we run our cleanup() before
676*9c5db199SXin Li                # reraising it, but log it to so actual time of error is known.
677*9c5db199SXin Li                exc_info = sys.exc_info()
678*9c5db199SXin Li                logging.warning('The test failed with the following exception',
679*9c5db199SXin Li                                exc_info=True)
680*9c5db199SXin Li
681*9c5db199SXin Li                try:
682*9c5db199SXin Li                    try:
683*9c5db199SXin Li                        if run_cleanup:
684*9c5db199SXin Li                            logging.debug('Running cleanup for test.')
685*9c5db199SXin Li                            utils.cherry_pick_call(self.cleanup, *args, **dargs)
686*9c5db199SXin Li                    except Exception:
687*9c5db199SXin Li                        logging.error('Ignoring exception during cleanup() '
688*9c5db199SXin Li                                      'phase:')
689*9c5db199SXin Li                        traceback.print_exc()
690*9c5db199SXin Li                        logging.error('Now raising the earlier %s error',
691*9c5db199SXin Li                                      exc_info[0])
692*9c5db199SXin Li                    self.crash_handler_report()
693*9c5db199SXin Li                finally:
694*9c5db199SXin Li                    # Raise exception after running cleanup, reporting crash,
695*9c5db199SXin Li                    # and restoring job's logging, even if the first two
696*9c5db199SXin Li                    # actions fail.
697*9c5db199SXin Li                    self.job.logging.restore()
698*9c5db199SXin Li                    try:
699*9c5db199SXin Li                        six.reraise(exc_info[0], exc_info[1], exc_info[2])
700*9c5db199SXin Li                    finally:
701*9c5db199SXin Li                        # http://docs.python.org/library/sys.html#sys.exc_info
702*9c5db199SXin Li                        # Be nice and prevent a circular reference.
703*9c5db199SXin Li                        del exc_info
704*9c5db199SXin Li            else:
705*9c5db199SXin Li                try:
706*9c5db199SXin Li                    if run_cleanup:
707*9c5db199SXin Li                        utils.cherry_pick_call(self.cleanup, *args, **dargs)
708*9c5db199SXin Li                    self.crash_handler_report()
709*9c5db199SXin Li                finally:
710*9c5db199SXin Li                    self.job.logging.restore()
711*9c5db199SXin Li        except error.AutotestError:
712*9c5db199SXin Li            # Pass already-categorized errors on up.
713*9c5db199SXin Li            raise
714*9c5db199SXin Li        except Exception as e:
715*9c5db199SXin Li            # Anything else is an ERROR in our own code, not execute().
716*9c5db199SXin Li            raise error.UnhandledTestError(e)
717*9c5db199SXin Li
718*9c5db199SXin Li    def runsubtest(self, url, *args, **dargs):
719*9c5db199SXin Li        """
720*9c5db199SXin Li        Execute another autotest test from inside the current test's scope.
721*9c5db199SXin Li
722*9c5db199SXin Li        @param test: Parent test.
723*9c5db199SXin Li        @param url: Url of new test.
724*9c5db199SXin Li        @param tag: Tag added to test name.
725*9c5db199SXin Li        @param args: Args for subtest.
726*9c5db199SXin Li        @param dargs: Dictionary with args for subtest.
727*9c5db199SXin Li        @iterations: Number of subtest iterations.
728*9c5db199SXin Li        @profile_only: If true execute one profiled run.
729*9c5db199SXin Li        """
730*9c5db199SXin Li        dargs["profile_only"] = dargs.get("profile_only", False)
731*9c5db199SXin Li        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
732*9c5db199SXin Li        return self.job.run_test(url, main_testpath=test_basepath,
733*9c5db199SXin Li                                 *args, **dargs)
734*9c5db199SXin Li
735*9c5db199SXin Li
736*9c5db199SXin Lidef _validate_args(args, dargs, *funcs):
737*9c5db199SXin Li    """Verify that arguments are appropriate for at least one callable.
738*9c5db199SXin Li
739*9c5db199SXin Li    Given a list of callables as additional parameters, verify that
740*9c5db199SXin Li    the proposed keyword arguments in dargs will each be accepted by at least
741*9c5db199SXin Li    one of the callables.
742*9c5db199SXin Li
743*9c5db199SXin Li    NOTE: args is currently not supported and must be empty.
744*9c5db199SXin Li
745*9c5db199SXin Li    Args:
746*9c5db199SXin Li      args: A tuple of proposed positional arguments.
747*9c5db199SXin Li      dargs: A dictionary of proposed keyword arguments.
748*9c5db199SXin Li      *funcs: Callables to be searched for acceptance of args and dargs.
749*9c5db199SXin Li    Raises:
750*9c5db199SXin Li      error.AutotestError: if an arg won't be accepted by any of *funcs.
751*9c5db199SXin Li    """
752*9c5db199SXin Li    all_co_flags = 0
753*9c5db199SXin Li    all_varnames = ()
754*9c5db199SXin Li    for func in funcs:
755*9c5db199SXin Li        all_co_flags |= func.__code__.co_flags
756*9c5db199SXin Li        all_varnames += func.__code__.co_varnames[:func.__code__.co_argcount]
757*9c5db199SXin Li
758*9c5db199SXin Li    # Check if given args belongs to at least one of the methods below.
759*9c5db199SXin Li    if len(args) > 0:
760*9c5db199SXin Li        # Current implementation doesn't allow the use of args.
761*9c5db199SXin Li        raise error.TestError('Unnamed arguments not accepted. Please '
762*9c5db199SXin Li                              'call job.run_test with named args only')
763*9c5db199SXin Li
764*9c5db199SXin Li    # Check if given dargs belongs to at least one of the methods below.
765*9c5db199SXin Li    if len(dargs) > 0:
766*9c5db199SXin Li        if not all_co_flags & 0x08:
767*9c5db199SXin Li            # no func accepts *dargs, so:
768*9c5db199SXin Li            for param in dargs:
769*9c5db199SXin Li                if not param in all_varnames:
770*9c5db199SXin Li                    raise error.AutotestError('Unknown parameter: %s' % param)
771*9c5db199SXin Li
772*9c5db199SXin Li
773*9c5db199SXin Lidef _installtest(job, url):
774*9c5db199SXin Li    (group, name) = job.pkgmgr.get_package_name(url, 'test')
775*9c5db199SXin Li
776*9c5db199SXin Li    # Bail if the test is already installed
777*9c5db199SXin Li    group_dir = os.path.join(job.testdir, "download", group)
778*9c5db199SXin Li    if os.path.exists(os.path.join(group_dir, name)):
779*9c5db199SXin Li        return (group, name)
780*9c5db199SXin Li
781*9c5db199SXin Li    # If the group directory is missing create it and add
782*9c5db199SXin Li    # an empty  __init__.py so that sub-directories are
783*9c5db199SXin Li    # considered for import.
784*9c5db199SXin Li    if not os.path.exists(group_dir):
785*9c5db199SXin Li        os.makedirs(group_dir)
786*9c5db199SXin Li        f = open(os.path.join(group_dir, '__init__.py'), 'w+')
787*9c5db199SXin Li        f.close()
788*9c5db199SXin Li
789*9c5db199SXin Li    logging.debug("%s: installing test url=%s", name, url)
790*9c5db199SXin Li    tarball = os.path.basename(url)
791*9c5db199SXin Li    tarball_path = os.path.join(group_dir, tarball)
792*9c5db199SXin Li    test_dir = os.path.join(group_dir, name)
793*9c5db199SXin Li    job.pkgmgr.fetch_pkg(tarball, tarball_path,
794*9c5db199SXin Li                         repo_url = os.path.dirname(url))
795*9c5db199SXin Li
796*9c5db199SXin Li    # Create the directory for the test
797*9c5db199SXin Li    if not os.path.exists(test_dir):
798*9c5db199SXin Li        os.mkdir(os.path.join(group_dir, name))
799*9c5db199SXin Li
800*9c5db199SXin Li    job.pkgmgr.untar_pkg(tarball_path, test_dir)
801*9c5db199SXin Li
802*9c5db199SXin Li    os.remove(tarball_path)
803*9c5db199SXin Li
804*9c5db199SXin Li    # For this 'sub-object' to be importable via the name
805*9c5db199SXin Li    # 'group.name' we need to provide an __init__.py,
806*9c5db199SXin Li    # so link the main entry point to this.
807*9c5db199SXin Li    os.symlink(name + '.py', os.path.join(group_dir, name,
808*9c5db199SXin Li                            '__init__.py'))
809*9c5db199SXin Li
810*9c5db199SXin Li    # The test is now installed.
811*9c5db199SXin Li    return (group, name)
812*9c5db199SXin Li
813*9c5db199SXin Li
814*9c5db199SXin Lidef _call_test_function(func, *args, **dargs):
815*9c5db199SXin Li    """Calls a test function and translates exceptions so that errors
816*9c5db199SXin Li    inside test code are considered test failures."""
817*9c5db199SXin Li    try:
818*9c5db199SXin Li        return func(*args, **dargs)
819*9c5db199SXin Li    except error.AutotestError:
820*9c5db199SXin Li        raise
821*9c5db199SXin Li    except Exception as e:
822*9c5db199SXin Li        # Other exceptions must be treated as a FAIL when
823*9c5db199SXin Li        # raised during the test functions
824*9c5db199SXin Li        raise error.UnhandledTestFail(e)
825*9c5db199SXin Li
826*9c5db199SXin Li
827*9c5db199SXin Lidef runtest(job,
828*9c5db199SXin Li            url,
829*9c5db199SXin Li            tag,
830*9c5db199SXin Li            args,
831*9c5db199SXin Li            dargs,
832*9c5db199SXin Li            local_namespace={},
833*9c5db199SXin Li            global_namespace={},
834*9c5db199SXin Li            before_test_hook=None,
835*9c5db199SXin Li            after_test_hook=None,
836*9c5db199SXin Li            before_iteration_hook=None,
837*9c5db199SXin Li            after_iteration_hook=None,
838*9c5db199SXin Li            override_test_in_prog_file=None):
839*9c5db199SXin Li    local_namespace = local_namespace.copy()
840*9c5db199SXin Li    global_namespace = global_namespace.copy()
841*9c5db199SXin Li    # if this is not a plain test name then download and install the
842*9c5db199SXin Li    # specified test
843*9c5db199SXin Li    if url.endswith('.tar.bz2'):
844*9c5db199SXin Li        (testgroup, testname) = _installtest(job, url)
845*9c5db199SXin Li        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
846*9c5db199SXin Li        importdir = os.path.join(job.testdir, 'download')
847*9c5db199SXin Li        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
848*9c5db199SXin Li        classname = '%s.%s' % (modulename, testname)
849*9c5db199SXin Li        path = testname
850*9c5db199SXin Li    else:
851*9c5db199SXin Li        # If the test is local, it may be under either testdir or site_testdir.
852*9c5db199SXin Li        # Tests in site_testdir override tests defined in testdir
853*9c5db199SXin Li        testname = path = url
854*9c5db199SXin Li        testgroup = ''
855*9c5db199SXin Li        path = re.sub(':', '/', testname)
856*9c5db199SXin Li        modulename = os.path.basename(path)
857*9c5db199SXin Li        classname = '%s.%s' % (modulename, modulename)
858*9c5db199SXin Li
859*9c5db199SXin Li        # Try installing the test package
860*9c5db199SXin Li        # The job object may be either a server side job or a client side job.
861*9c5db199SXin Li        # 'install_pkg' method will be present only if it's a client side job.
862*9c5db199SXin Li        if hasattr(job, 'install_pkg'):
863*9c5db199SXin Li            try:
864*9c5db199SXin Li                bindir = os.path.join(job.testdir, testname)
865*9c5db199SXin Li                job.install_pkg(testname, 'test', bindir)
866*9c5db199SXin Li            except error.PackageInstallError:
867*9c5db199SXin Li                # continue as a fall back mechanism and see if the test code
868*9c5db199SXin Li                # already exists on the machine
869*9c5db199SXin Li                pass
870*9c5db199SXin Li
871*9c5db199SXin Li        bindir = None
872*9c5db199SXin Li        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
873*9c5db199SXin Li            if dir is not None and os.path.exists(os.path.join(dir, path)):
874*9c5db199SXin Li                importdir = bindir = os.path.join(dir, path)
875*9c5db199SXin Li        if not bindir:
876*9c5db199SXin Li            raise error.TestError(testname + ': test does not exist')
877*9c5db199SXin Li
878*9c5db199SXin Li    subdir = os.path.join(dargs.pop('main_testpath', ""), testname)
879*9c5db199SXin Li    outputdir = os.path.join(job.resultdir, subdir)
880*9c5db199SXin Li    if tag:
881*9c5db199SXin Li        outputdir += '.' + tag
882*9c5db199SXin Li
883*9c5db199SXin Li    local_namespace['job'] = job
884*9c5db199SXin Li    local_namespace['bindir'] = bindir
885*9c5db199SXin Li    local_namespace['outputdir'] = outputdir
886*9c5db199SXin Li
887*9c5db199SXin Li    sys.path.insert(0, importdir)
888*9c5db199SXin Li    try:
889*9c5db199SXin Li        exec ('import %s' % modulename, local_namespace, global_namespace)
890*9c5db199SXin Li        exec ("mytest = %s(job, bindir, outputdir)" % classname,
891*9c5db199SXin Li              local_namespace, global_namespace)
892*9c5db199SXin Li    finally:
893*9c5db199SXin Li        sys.path.pop(0)
894*9c5db199SXin Li
895*9c5db199SXin Li    pwd = os.getcwd()
896*9c5db199SXin Li    os.chdir(outputdir)
897*9c5db199SXin Li
898*9c5db199SXin Li    try:
899*9c5db199SXin Li        mytest = global_namespace['mytest']
900*9c5db199SXin Li        if hasattr(job, 'force_full_log_collection'):
901*9c5db199SXin Li            mytest.force_full_log_collection = job.force_full_log_collection
902*9c5db199SXin Li        if override_test_in_prog_file:
903*9c5db199SXin Li            mytest.test_in_prog_file = override_test_in_prog_file
904*9c5db199SXin Li        mytest.success = False
905*9c5db199SXin Li        if before_test_hook:
906*9c5db199SXin Li            logging.info('Starting before_hook for %s', mytest.tagged_testname)
907*9c5db199SXin Li            with metrics.SecondsTimer(
908*9c5db199SXin Li                    'chromeos/autotest/job/before_hook_duration'):
909*9c5db199SXin Li                before_test_hook(mytest)
910*9c5db199SXin Li            logging.info('before_hook completed')
911*9c5db199SXin Li
912*9c5db199SXin Li        # we use the register iteration hooks methods to register the passed
913*9c5db199SXin Li        # in hooks
914*9c5db199SXin Li        if before_iteration_hook:
915*9c5db199SXin Li            mytest.register_before_iteration_hook(before_iteration_hook)
916*9c5db199SXin Li        if after_iteration_hook:
917*9c5db199SXin Li            mytest.register_after_iteration_hook(after_iteration_hook)
918*9c5db199SXin Li        mytest._exec(args, dargs)
919*9c5db199SXin Li        mytest.success = True
920*9c5db199SXin Li    finally:
921*9c5db199SXin Li        os.chdir(pwd)
922*9c5db199SXin Li        if after_test_hook:
923*9c5db199SXin Li            logging.info('Starting after_hook for %s', mytest.tagged_testname)
924*9c5db199SXin Li            with metrics.SecondsTimer(
925*9c5db199SXin Li                    'chromeos/autotest/job/after_hook_duration'):
926*9c5db199SXin Li                after_test_hook(mytest)
927*9c5db199SXin Li            logging.info('after_hook completed')
928*9c5db199SXin Li
929*9c5db199SXin Li        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
930