xref: /aosp_15_r20/external/autotest/client/common_lib/test_unittest.py (revision 9c5db1993ded3edbeafc8092d69fe5de2ee02df7)
1#!/usr/bin/python3
2#pylint: disable-msg=C0111
3"""Unit Tests for autotest.client.common_lib.test"""
4
5from __future__ import absolute_import
6from __future__ import division
7from __future__ import print_function
8
9__author__ = '[email protected] (Gregory P. Smith)'
10
11import json
12import tempfile
13import unittest
14import common
15import mock as pymock
16import os
17import shutil
18import six
19from six.moves import range
20
21from autotest_lib.client.common_lib import test
22from autotest_lib.client.common_lib.test_utils import mock
23
24
25class TestTestCase(unittest.TestCase):
26    class _neutered_base_test(test.base_test):
27        """A child class of base_test to avoid calling the constructor."""
28        def __init__(self, *args, **kwargs):
29            class MockJob(object):
30                pass
31            class MockProfilerManager(object):
32                def active(self):
33                    return False
34                def present(self):
35                    return True
36            self.job = MockJob()
37            self.job.default_profile_only = False
38            self.job.profilers = MockProfilerManager()
39            self.job.test_retry = 0
40            self.job.fast = False
41            self._new_keyval = False
42            self.iteration = 0
43            self.tagged_testname = 'neutered_base_test'
44            self.before_iteration_hooks = []
45            self.after_iteration_hooks = []
46
47            self.crash_reporter_dir = tempfile.mkdtemp()
48            # Make a temp dir for the test-in-prog file to be created.
49            self.test_in_prog_file = os.path.join(self.crash_reporter_dir,
50                                                  "test-in-prog")
51
52    def setUp(self):
53        self.god = mock.mock_god()
54        self.test = self._neutered_base_test()
55
56
57    def tearDown(self):
58        self.god.unstub_all()
59        shutil.rmtree(self.test.crash_reporter_dir)
60
61
62
63class Test_base_test_execute(TestTestCase):
64    # Test the various behaviors of the base_test.execute() method.
65    def setUp(self):
66        TestTestCase.setUp(self)
67        self.god.stub_function(self.test, 'run_once_profiling')
68        self.god.stub_function(self.test, 'postprocess')
69        self.god.stub_function(self.test, 'process_failed_constraints')
70
71
72    def test_call_run_once(self):
73        # setup
74        self.god.stub_function(self.test, 'drop_caches_between_iterations')
75        self.god.stub_function(self.test, 'run_once')
76        self.god.stub_function(self.test, 'postprocess_iteration')
77        self.god.stub_function(self.test, 'analyze_perf_constraints')
78        before_hook = self.god.create_mock_function('before_hook')
79        after_hook = self.god.create_mock_function('after_hook')
80        self.test.register_before_iteration_hook(before_hook)
81        self.test.register_after_iteration_hook(after_hook)
82
83        # tests the test._call_run_once implementation
84        self.test.drop_caches_between_iterations.expect_call()
85        before_hook.expect_call(self.test)
86        self.test.run_once.expect_call(1, 2, arg='val')
87        self.test.postprocess_iteration.expect_call()
88        self.test.analyze_perf_constraints.expect_call([])
89        after_hook.expect_call(self.test)
90        self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
91        self.god.check_playback()
92
93
94    def test_call_run_once_with_exception(self):
95        # setup
96        self.god.stub_function(self.test, 'drop_caches_between_iterations')
97        self.god.stub_function(self.test, 'run_once')
98        before_hook = self.god.create_mock_function('before_hook')
99        after_hook = self.god.create_mock_function('after_hook')
100        self.test.register_before_iteration_hook(before_hook)
101        self.test.register_after_iteration_hook(after_hook)
102        error = Exception('fail')
103
104        # tests the test._call_run_once implementation
105        self.test.drop_caches_between_iterations.expect_call()
106        before_hook.expect_call(self.test)
107        self.test.run_once.expect_call(1, 2, arg='val').and_raises(error)
108        after_hook.expect_call(self.test)
109        try:
110            self.test._call_run_once([], False, None, (1, 2), {'arg': 'val'})
111        except:
112            pass
113        self.god.check_playback()
114
115
116    def _expect_call_run_once(self):
117        self.test._call_run_once.expect_call((), False, None, (), {})
118
119
120    def test_execute_test_length(self):
121        # test that test_length overrides iterations and works.
122        self.god.stub_function(self.test, '_call_run_once')
123
124        self._expect_call_run_once()
125        self._expect_call_run_once()
126        self._expect_call_run_once()
127        self.test.run_once_profiling.expect_call(None)
128        self.test.postprocess.expect_call()
129        self.test.process_failed_constraints.expect_call()
130
131        if six.PY2:
132            fake_time = iter(range(4)).next
133        else:
134            fake_time = iter(range(4)).__next__
135        self.test.execute(iterations=1, test_length=3, _get_time=fake_time)
136        self.god.check_playback()
137
138
139    def test_execute_iterations(self):
140        # test that iterations works.
141        self.god.stub_function(self.test, '_call_run_once')
142
143        iterations = 2
144        for _ in range(iterations):
145            self._expect_call_run_once()
146        self.test.run_once_profiling.expect_call(None)
147        self.test.postprocess.expect_call()
148        self.test.process_failed_constraints.expect_call()
149
150        self.test.execute(iterations=iterations)
151        self.god.check_playback()
152
153
154    def _mock_calls_for_execute_no_iterations(self):
155        self.test.run_once_profiling.expect_call(None)
156        self.test.postprocess.expect_call()
157        self.test.process_failed_constraints.expect_call()
158
159
160    def test_execute_iteration_zero(self):
161        # test that iterations=0 works.
162        self._mock_calls_for_execute_no_iterations()
163
164        self.test.execute(iterations=0)
165        self.god.check_playback()
166
167
168    def test_execute_profile_only(self):
169        # test that profile_only=True works.
170        self.god.stub_function(self.test, 'drop_caches_between_iterations')
171        self.test.drop_caches_between_iterations.expect_call()
172        self.test.run_once_profiling.expect_call(None)
173        self.test.drop_caches_between_iterations.expect_call()
174        self.test.run_once_profiling.expect_call(None)
175        self.test.postprocess.expect_call()
176        self.test.process_failed_constraints.expect_call()
177        self.test.execute(profile_only=True, iterations=2)
178        self.god.check_playback()
179
180
181    def test_execute_default_profile_only(self):
182        # test that profile_only=True works.
183        self.god.stub_function(self.test, 'drop_caches_between_iterations')
184        for _ in range(3):
185            self.test.drop_caches_between_iterations.expect_call()
186            self.test.run_once_profiling.expect_call(None)
187        self.test.postprocess.expect_call()
188        self.test.process_failed_constraints.expect_call()
189        self.test.job.default_profile_only = True
190        self.test.execute(iterations=3)
191        self.god.check_playback()
192
193
194    def test_execute_postprocess_profiled_false(self):
195        # test that postprocess_profiled_run=False works
196        self.god.stub_function(self.test, '_call_run_once')
197
198        self.test._call_run_once.expect_call((), False, False, (), {})
199        self.test.run_once_profiling.expect_call(False)
200        self.test.postprocess.expect_call()
201        self.test.process_failed_constraints.expect_call()
202
203        self.test.execute(postprocess_profiled_run=False, iterations=1)
204        self.god.check_playback()
205
206
207    def test_execute_postprocess_profiled_true(self):
208        # test that postprocess_profiled_run=True works
209        self.god.stub_function(self.test, '_call_run_once')
210
211        self.test._call_run_once.expect_call((), False, True, (), {})
212        self.test.run_once_profiling.expect_call(True)
213        self.test.postprocess.expect_call()
214        self.test.process_failed_constraints.expect_call()
215
216        self.test.execute(postprocess_profiled_run=True, iterations=1)
217        self.god.check_playback()
218
219
220    def test_output_single_perf_value(self):
221        self.test.resultsdir = tempfile.mkdtemp()
222
223        self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
224
225        f = open(self.test.resultsdir + "/results-chart.json")
226        expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
227                           "value": 1, "improvement_direction": "up"}}}
228        self.assertDictEqual(expected_result, json.loads(f.read()))
229
230    def test_output_perf_value_with_custom_resultsdir(self):
231        self.test.resultsdir = tempfile.mkdtemp()
232
233        resultsdir = self.test.resultsdir + "/tests/tmp"
234        self.test.output_perf_value("Test", 1, units="ms",higher_is_better=True,
235                                    resultsdir=resultsdir)
236
237        f = open(self.test.resultsdir + "/tests/tmp/results-chart.json")
238        expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
239                           "value": 1, "improvement_direction": "up"}}}
240        self.assertDictEqual(expected_result, json.loads(f.read()))
241
242
243    def test_output_single_perf_value_twice(self):
244        self.test.resultsdir = tempfile.mkdtemp()
245
246        self.test.output_perf_value("Test", 1, units="ms", higher_is_better=True)
247        self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
248
249        f = open(self.test.resultsdir + "/results-chart.json")
250        expected_result = {"Test": {"summary": {"units": "ms",
251                           "type": "list_of_scalar_values", "values": [1, 2],
252                           "improvement_direction": "up"}}}
253        self.assertDictEqual(expected_result, json.loads(f.read()))
254
255
256    def test_output_single_perf_value_three_times(self):
257        self.test.resultsdir = tempfile.mkdtemp()
258
259        self.test.output_perf_value("Test", 1, units="ms",
260                                    higher_is_better=True)
261        self.test.output_perf_value("Test", 2, units="ms", higher_is_better=True)
262        self.test.output_perf_value("Test", 3, units="ms", higher_is_better=True)
263
264        f = open(self.test.resultsdir + "/results-chart.json")
265        expected_result = {"Test": {"summary": {"units": "ms",
266                           "type": "list_of_scalar_values", "values": [1, 2, 3],
267                           "improvement_direction": "up"}}}
268        self.assertDictEqual(expected_result, json.loads(f.read()))
269
270
271    def test_output_list_perf_value(self):
272        self.test.resultsdir = tempfile.mkdtemp()
273
274        self.test.output_perf_value("Test", [1, 2, 3], units="ms",
275                                    higher_is_better=False)
276
277        f = open(self.test.resultsdir + "/results-chart.json")
278        expected_result = {"Test": {"summary": {"units": "ms",
279                           "type": "list_of_scalar_values", "values": [1, 2, 3],
280                           "improvement_direction": "down"}}}
281        self.assertDictEqual(expected_result, json.loads(f.read()))
282
283
284    def test_output_single_then_list_perf_value(self):
285        self.test.resultsdir = tempfile.mkdtemp()
286        self.test.output_perf_value("Test", 1, units="ms",
287                                    higher_is_better=False)
288        self.test.output_perf_value("Test", [4, 3, 2], units="ms",
289                                    higher_is_better=False)
290        f = open(self.test.resultsdir + "/results-chart.json")
291        expected_result = {"Test": {"summary": {"units": "ms",
292                           "type": "list_of_scalar_values",
293                           "values": [1, 4, 3, 2],
294                           "improvement_direction": "down"}}}
295        self.assertDictEqual(expected_result, json.loads(f.read()))
296
297
298    def test_output_list_then_list_perf_value(self):
299        self.test.resultsdir = tempfile.mkdtemp()
300        self.test.output_perf_value("Test", [1, 2, 3], units="ms",
301                                    higher_is_better=False)
302        self.test.output_perf_value("Test", [4, 3, 2], units="ms",
303                                    higher_is_better=False)
304        f = open(self.test.resultsdir + "/results-chart.json")
305        expected_result = {"Test": {"summary": {"units": "ms",
306                           "type": "list_of_scalar_values",
307                           "values": [1, 2, 3, 4, 3, 2],
308                           "improvement_direction": "down"}}}
309        self.assertDictEqual(expected_result, json.loads(f.read()))
310
311
312    def test_output_single_perf_value_input_string(self):
313        self.test.resultsdir = tempfile.mkdtemp()
314
315        self.test.output_perf_value("Test", u'-0.34', units="ms",
316                                    higher_is_better=True)
317
318        f = open(self.test.resultsdir + "/results-chart.json")
319        expected_result = {"Test": {"summary": {"units": "ms", "type": "scalar",
320                           "value": -0.34, "improvement_direction": "up"}}}
321        self.assertDictEqual(expected_result, json.loads(f.read()))
322
323
324    def test_output_single_perf_value_input_list_of_string(self):
325        self.test.resultsdir = tempfile.mkdtemp()
326
327        self.test.output_perf_value("Test", [0, u'-0.34', 1], units="ms",
328                                    higher_is_better=True)
329
330        f = open(self.test.resultsdir + "/results-chart.json")
331        expected_result = {"Test": {"summary": {"units": "ms",
332                           "type": "list_of_scalar_values",
333                           "values": [0, -0.34, 1],
334                           "improvement_direction": "up"}}}
335        self.assertDictEqual(expected_result, json.loads(f.read()))
336
337    def test_output_list_then_replace_list_perf_value(self):
338        self.test.resultsdir = tempfile.mkdtemp()
339        self.test.output_perf_value("Test", [1, 2, 3], units="ms",
340                                    higher_is_better=False)
341        self.test.output_perf_value("Test", [4, 5, 6], units="ms",
342                                    higher_is_better=False,
343                                    replace_existing_values=True)
344        f = open(self.test.resultsdir + "/results-chart.json")
345        expected_result = {"Test": {"summary": {"units": "ms",
346                           "type": "list_of_scalar_values",
347                           "values": [4, 5, 6],
348                           "improvement_direction": "down"}}}
349        self.assertDictEqual(expected_result, json.loads(f.read()))
350
351    def test_output_single_then_replace_list_perf_value(self):
352        self.test.resultsdir = tempfile.mkdtemp()
353        self.test.output_perf_value("Test", 3, units="ms",
354                                    higher_is_better=False)
355        self.test.output_perf_value("Test", [4, 5, 6], units="ms",
356                                    higher_is_better=False,
357                                    replace_existing_values=True)
358        f = open(self.test.resultsdir + "/results-chart.json")
359        expected_result = {"Test": {"summary": {"units": "ms",
360                           "type": "list_of_scalar_values",
361                           "values": [4, 5, 6],
362                           "improvement_direction": "down"}}}
363        self.assertDictEqual(expected_result, json.loads(f.read()))
364
365    def test_output_list_then_replace_single_perf_value(self):
366        self.test.resultsdir = tempfile.mkdtemp()
367        self.test.output_perf_value("Test", [1,2,3], units="ms",
368                                    higher_is_better=False)
369        self.test.output_perf_value("Test", 4, units="ms",
370                                    higher_is_better=False,
371                                    replace_existing_values=True)
372        f = open(self.test.resultsdir + "/results-chart.json")
373        expected_result = {"Test": {"summary": {"units": "ms",
374                           "type": "scalar",
375                           "value": 4,
376                           "improvement_direction": "down"}}}
377        self.assertDictEqual(expected_result, json.loads(f.read()))
378
379    def test_output_single_then_replace_single_perf_value(self):
380        self.test.resultsdir = tempfile.mkdtemp()
381        self.test.output_perf_value("Test", 1, units="ms",
382                                    higher_is_better=False)
383        self.test.output_perf_value("Test", 2, units="ms",
384                                    higher_is_better=False,
385                                    replace_existing_values=True)
386        f = open(self.test.resultsdir + "/results-chart.json")
387        expected_result = {"Test": {"summary": {"units": "ms",
388                           "type": "scalar",
389                           "value": 2,
390                           "improvement_direction": "down"}}}
391        self.assertDictEqual(expected_result, json.loads(f.read()))
392
393    def test_output_perf_then_replace_certain_perf_value(self):
394        self.test.resultsdir = tempfile.mkdtemp()
395        self.test.output_perf_value("Test1", 1, units="ms",
396                                    higher_is_better=False)
397        self.test.output_perf_value("Test2", 2, units="ms",
398                                    higher_is_better=False)
399        self.test.output_perf_value("Test3", 3, units="ms",
400                                    higher_is_better=False)
401        self.test.output_perf_value("Test2", -1, units="ms",
402                                    higher_is_better=False,
403                                    replace_existing_values=True)
404        f = open(self.test.resultsdir + "/results-chart.json")
405        expected_result = {"Test1": {"summary":
406                                       {"units": "ms",
407                                        "type": "scalar",
408                                        "value": 1,
409                                        "improvement_direction": "down"}},
410                           "Test2": {"summary":
411                                       {"units": "ms",
412                                        "type": "scalar",
413                                        "value": -1,
414                                        "improvement_direction": "down"}},
415                           "Test3": {"summary":
416                                       {"units": "ms",
417                                        "type": "scalar",
418                                        "value": 3,
419                                        "improvement_direction": "down"}}}
420        self.assertDictEqual(expected_result, json.loads(f.read()))
421
422    def test_chart_supplied(self):
423        self.test.resultsdir = tempfile.mkdtemp()
424
425        test_data = [("tcp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
426                     ("tcp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 5),
427                     ("tcp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
428                     ("tcp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
429                     ("tcp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
430                     ("tcp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0),
431                     ("udp_tx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
432                     ("udp_tx", "ch006_mode11B_none", "BT_streaming_audiofile", 6),
433                     ("udp_tx", "ch006_mode11B_none", "BT_disconnected_again", 0),
434                     ("udp_rx", "ch006_mode11B_none", "BT_connected_but_not_streaming", 0),
435                     ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 8),
436                     ("udp_rx", "ch006_mode11B_none", "BT_streaming_audiofile", 9),
437                     ("udp_rx", "ch006_mode11B_none", "BT_disconnected_again", 0)]
438
439
440        for (config_tag, ap_config_tag, bt_tag, drop) in test_data:
441            self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop',
442                                        drop,
443                                        units='percent_drop',
444                                        higher_is_better=False,
445                                        graph=ap_config_tag + '_drop')
446        f = open(self.test.resultsdir + "/results-chart.json")
447        expected_result = {
448          "ch006_mode11B_none_drop": {
449            "udp_tx_BT_streaming_audiofile_drop": {
450              "units": "percent_drop",
451              "type": "scalar",
452              "value": 6.0,
453              "improvement_direction": "down"
454            },
455            "udp_rx_BT_disconnected_again_drop": {
456              "units": "percent_drop",
457              "type": "scalar",
458              "value": 0.0,
459              "improvement_direction": "down"
460            },
461            "tcp_tx_BT_disconnected_again_drop": {
462              "units": "percent_drop",
463              "type": "scalar",
464              "value": 0.0,
465              "improvement_direction": "down"
466            },
467            "tcp_rx_BT_streaming_audiofile_drop": {
468              "units": "percent_drop",
469              "type": "scalar",
470              "value": 8.0,
471              "improvement_direction": "down"
472            },
473            "udp_tx_BT_connected_but_not_streaming_drop": {
474              "units": "percent_drop",
475              "type": "scalar",
476              "value": 0.0,
477              "improvement_direction": "down"
478            },
479            "tcp_tx_BT_connected_but_not_streaming_drop": {
480              "units": "percent_drop",
481              "type": "scalar",
482              "value": 0.0,
483              "improvement_direction": "down"
484            },
485            "udp_tx_BT_disconnected_again_drop": {
486              "units": "percent_drop",
487              "type": "scalar",
488              "value": 0.0,
489              "improvement_direction": "down"
490            },
491            "tcp_tx_BT_streaming_audiofile_drop": {
492              "units": "percent_drop",
493              "type": "scalar",
494              "value": 5.0,
495              "improvement_direction": "down"
496            },
497            "tcp_rx_BT_connected_but_not_streaming_drop": {
498              "units": "percent_drop",
499              "type": "scalar",
500              "value": 0.0,
501              "improvement_direction": "down"
502            },
503            "udp_rx_BT_connected_but_not_streaming_drop": {
504              "units": "percent_drop",
505              "type": "scalar",
506              "value": 0.0,
507              "improvement_direction": "down"
508            },
509            "udp_rx_BT_streaming_audiofile_drop": {
510              "units": "percent_drop",
511              "type": "list_of_scalar_values",
512              "values": [
513                8.0,
514                9.0
515              ],
516              "improvement_direction": "down"
517            },
518            "tcp_rx_BT_disconnected_again_drop": {
519              "units": "percent_drop",
520              "type": "scalar",
521              "value": 0.0,
522              "improvement_direction": "down"
523            }
524          }
525        }
526        self.maxDiff = None
527        self.assertDictEqual(expected_result, json.loads(f.read()))
528
529class Test_runtest(unittest.TestCase):
530    _TEST_CONTENTS = """
531from autotest_lib.client.common_lib import test
532
533class mocktest(test.base_test):
534    version = 1
535    def initialize(self, host, arg1=None):
536        self.job.initialize_mock(host, arg1)
537
538    def warmup(self, host):
539        self.job.warmup_mock(host)
540
541    def run_once(self, arg2):
542        self.job.run_once_mock(arg2)
543
544    def cleanup(self, **kwargs):
545        self.job.cleanup_mock(**kwargs)
546    """
547    def setUp(self):
548        self.workdir = tempfile.mkdtemp()
549        self.testname = 'mocktest'
550        testdir = os.path.join(self.workdir, 'tests')
551        resultdir = os.path.join(self.workdir, 'results')
552        tmpdir = os.path.join(self.workdir, 'tmp')
553
554        self.test_in_prog_file = os.path.join(tmpdir, "test-in-prog")
555
556        os.makedirs(os.path.join(testdir, self.testname))
557        os.makedirs(os.path.join(resultdir, self.testname))
558        os.makedirs(tmpdir)
559
560        self.job = pymock.MagicMock(testdir=testdir, resultdir=resultdir,
561                                    tmpdir=tmpdir, site_testdir=None)
562
563        with open(os.path.join(self.job.testdir, self.testname,
564                  '{}.py'.format(self.testname)), 'w') as f:
565            f.write(self._TEST_CONTENTS)
566
567    def tearDown(self):
568        shutil.rmtree(self.workdir)
569
570    def test_runtest(self):
571        all_args = {'host': 'hostvalue', 'arg1': 'value1', 'arg2': 'value2'}
572        test.runtest(self.job,
573                     self.testname,
574                     '', (),
575                     all_args,
576                     override_test_in_prog_file=self.test_in_prog_file)
577        self.job.initialize_mock.assert_called_with('hostvalue', 'value1')
578        self.job.warmup_mock.assert_called_with('hostvalue')
579        self.job.run_once_mock.assert_called_with('value2')
580        self.job.cleanup_mock.assert_called_with(**all_args)
581
582if __name__ == '__main__':
583    unittest.main()
584