xref: /aosp_15_r20/external/toolchain-utils/afdo_tools/bisection/afdo_prof_analysis_e2e_test.py (revision 760c253c1ed00ce9abd48f8546f08516e57485fe)
1#!/usr/bin/env python3
2# -*- coding: utf-8 -*-
3# Copyright 2019 The ChromiumOS Authors
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7"""End-to-end test for afdo_prof_analysis."""
8
9
10import json
11import os
12import shutil
13import tempfile
14import unittest
15from datetime import date
16
17from afdo_tools.bisection import afdo_prof_analysis as analysis
18
19
20class ObjectWithFields(object):
21    """Turns kwargs given to the constructor into fields on an object.
22
23    Examples:
24      x = ObjectWithFields(a=1, b=2)
25      assert x.a == 1
26      assert x.b == 2
27    """
28
29    def __init__(self, **kwargs):
30        for key, val in kwargs.items():
31            setattr(self, key, val)
32
33
34class AfdoProfAnalysisE2ETest(unittest.TestCase):
35    """Class for end-to-end testing of AFDO Profile Analysis"""
36
37    # nothing significant about the values, just easier to remember even vs odd
38    good_prof = {
39        "func_a": ":1\n 1: 3\n 3: 5\n 5: 7\n",
40        "func_b": ":3\n 3: 5\n 5: 7\n 7: 9\n",
41        "func_c": ":5\n 5: 7\n 7: 9\n 9: 11\n",
42        "func_d": ":7\n 7: 9\n 9: 11\n 11: 13\n",
43        "good_func_a": ":11\n",
44        "good_func_b": ":13\n",
45    }
46
47    bad_prof = {
48        "func_a": ":2\n 2: 4\n 4: 6\n 6: 8\n",
49        "func_b": ":4\n 4: 6\n 6: 8\n 8: 10\n",
50        "func_c": ":6\n 6: 8\n 8: 10\n 10: 12\n",
51        "func_d": ":8\n 8: 10\n 10: 12\n 12: 14\n",
52        "bad_func_a": ":12\n",
53        "bad_func_b": ":14\n",
54    }
55
56    expected = {
57        "good_only_functions": False,
58        "bad_only_functions": True,
59        "bisect_results": {"ranges": [], "individuals": ["func_a"]},
60    }
61
62    def test_afdo_prof_analysis(self):
63        # Individual issues take precedence by nature of our algos
64        # so first, that should be caught
65        good = self.good_prof.copy()
66        bad = self.bad_prof.copy()
67        self.run_check(good, bad, self.expected)
68
69        # Now remove individuals and exclusively BAD, and check that range is caught
70        bad["func_a"] = good["func_a"]
71        bad.pop("bad_func_a")
72        bad.pop("bad_func_b")
73
74        expected_cp = self.expected.copy()
75        expected_cp["bad_only_functions"] = False
76        expected_cp["bisect_results"] = {
77            "individuals": [],
78            "ranges": [["func_b", "func_c", "func_d"]],
79        }
80
81        self.run_check(good, bad, expected_cp)
82
83    def test_afdo_prof_state(self):
84        """Verifies that saved state is correct replication."""
85        temp_dir = tempfile.mkdtemp()
86        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
87
88        good = self.good_prof.copy()
89        bad = self.bad_prof.copy()
90        # add more functions to data
91        for x in range(400):
92            good["func_%d" % x] = ""
93            bad["func_%d" % x] = ""
94
95        fd_first, first_result = tempfile.mkstemp(dir=temp_dir)
96        os.close(fd_first)
97        fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
98        os.close(fd_state)
99        self.run_check(
100            self.good_prof,
101            self.bad_prof,
102            self.expected,
103            state_file=state_file,
104            out_file=first_result,
105        )
106
107        fd_second, second_result = tempfile.mkstemp(dir=temp_dir)
108        os.close(fd_second)
109        completed_state_file = "%s.completed.%s" % (
110            state_file,
111            str(date.today()),
112        )
113        self.run_check(
114            self.good_prof,
115            self.bad_prof,
116            self.expected,
117            state_file=completed_state_file,
118            no_resume=False,
119            out_file=second_result,
120        )
121
122        with open(first_result) as f:
123            initial_run = json.load(f)
124        with open(second_result) as f:
125            loaded_run = json.load(f)
126        self.assertEqual(initial_run, loaded_run)
127
128    def test_exit_on_problem_status(self):
129        temp_dir = tempfile.mkdtemp()
130        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
131
132        fd_state, state_file = tempfile.mkstemp(dir=temp_dir)
133        os.close(fd_state)
134        with self.assertRaises(RuntimeError):
135            self.run_check(
136                self.good_prof,
137                self.bad_prof,
138                self.expected,
139                state_file=state_file,
140                extern_decider="problemstatus_external.sh",
141            )
142
143    def test_state_assumption(self):
144        def compare_runs(tmp_dir, first_ctr, second_ctr):
145            """Compares given prof versions between first and second run in test."""
146            first_prof = "%s/.first_run_%d" % (tmp_dir, first_ctr)
147            second_prof = "%s/.second_run_%d" % (tmp_dir, second_ctr)
148            with open(first_prof) as f:
149                first_prof_text = f.read()
150            with open(second_prof) as f:
151                second_prof_text = f.read()
152            self.assertEqual(first_prof_text, second_prof_text)
153
154        good_prof = {"func_a": ":1\n3: 3\n5: 7\n"}
155        bad_prof = {"func_a": ":2\n4: 4\n6: 8\n"}
156        # add some noise to the profiles; 15 is an arbitrary choice
157        for x in range(15):
158            func = "func_%d" % x
159            good_prof[func] = ":%d\n" % (x)
160            bad_prof[func] = ":%d\n" % (x + 1)
161        expected = {
162            "bisect_results": {"ranges": [], "individuals": ["func_a"]},
163            "good_only_functions": False,
164            "bad_only_functions": False,
165        }
166
167        # using a static temp dir rather than a dynamic one because these files are
168        # shared between the bash scripts and this Python test, and the arguments
169        # to the bash scripts are fixed by afdo_prof_analysis.py so it would be
170        # difficult to communicate dynamically generated directory to bash scripts
171        scripts_tmp_dir = "%s/afdo_test_tmp" % os.getcwd()
172        os.mkdir(scripts_tmp_dir)
173        self.addCleanup(shutil.rmtree, scripts_tmp_dir, ignore_errors=True)
174
175        # files used in the bash scripts used as external deciders below
176        # - count_file tracks the current number of calls to the script in total
177        # - local_count_file tracks the number of calls to the script without
178        # interruption
179        count_file = "%s/.count" % scripts_tmp_dir
180        local_count_file = "%s/.local_count" % scripts_tmp_dir
181
182        # runs through whole thing at once
183        initial_seed = self.run_check(
184            good_prof,
185            bad_prof,
186            expected,
187            extern_decider="state_assumption_external.sh",
188        )
189        with open(count_file) as f:
190            num_calls = int(f.read())
191        os.remove(count_file)  # reset counts for second run
192        finished_state_file = "afdo_analysis_state.json.completed.%s" % str(
193            date.today()
194        )
195        self.addCleanup(os.remove, finished_state_file)
196
197        # runs the same analysis but interrupted each iteration
198        for i in range(2 * num_calls + 1):
199            no_resume_run = i == 0
200            seed = initial_seed if no_resume_run else None
201            try:
202                self.run_check(
203                    good_prof,
204                    bad_prof,
205                    expected,
206                    no_resume=no_resume_run,
207                    extern_decider="state_assumption_interrupt.sh",
208                    seed=seed,
209                )
210                break
211            except RuntimeError:
212                # script was interrupted, so we restart local count
213                os.remove(local_count_file)
214        else:
215            raise RuntimeError("Test failed -- took too many iterations")
216
217        for initial_ctr in range(3):  # initial runs unaffected by interruption
218            compare_runs(scripts_tmp_dir, initial_ctr, initial_ctr)
219
220        start = 3
221        for ctr in range(start, num_calls):
222            # second run counter incremented by 4 for each one first run is because
223            # +2 for performing initial checks on good and bad profs each time
224            # +1 for PROBLEM_STATUS run which causes error and restart
225            compare_runs(scripts_tmp_dir, ctr, 6 + (ctr - start) * 4)
226
227    def run_check(
228        self,
229        good_prof,
230        bad_prof,
231        expected,
232        state_file=None,
233        no_resume=True,
234        out_file=None,
235        extern_decider=None,
236        seed=None,
237    ):
238
239        temp_dir = tempfile.mkdtemp()
240        self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
241
242        good_prof_file = "%s/%s" % (temp_dir, "good_prof.txt")
243        bad_prof_file = "%s/%s" % (temp_dir, "bad_prof.txt")
244        good_prof_text = analysis.json_to_text(good_prof)
245        bad_prof_text = analysis.json_to_text(bad_prof)
246        with open(good_prof_file, "w") as f:
247            f.write(good_prof_text)
248        with open(bad_prof_file, "w") as f:
249            f.write(bad_prof_text)
250
251        dir_path = os.path.dirname(
252            os.path.realpath(__file__)
253        )  # dir of this file
254        external_script = "%s/%s" % (
255            dir_path,
256            extern_decider or "e2e_external.sh",
257        )
258
259        # FIXME: This test ideally shouldn't be writing to $PWD
260        if state_file is None:
261            state_file = "%s/afdo_analysis_state.json" % os.getcwd()
262
263            def rm_state():
264                try:
265                    os.unlink(state_file)
266                except OSError:
267                    # Probably because the file DNE. That's fine.
268                    pass
269
270            self.addCleanup(rm_state)
271
272        actual = analysis.main(
273            ObjectWithFields(
274                good_prof=good_prof_file,
275                bad_prof=bad_prof_file,
276                external_decider=external_script,
277                analysis_output_file=out_file or "/dev/null",
278                state_file=state_file,
279                no_resume=no_resume,
280                remove_state_on_completion=False,
281                seed=seed,
282            )
283        )
284        actual_seed = actual.pop("seed")  # nothing to check
285        self.assertEqual(actual, expected)
286        return actual_seed
287
288
289if __name__ == "__main__":
290    unittest.main()
291