xref: /aosp_15_r20/external/toolchain-utils/crosperf/experiment_status.py (revision 760c253c1ed00ce9abd48f8546f08516e57485fe)
1# -*- coding: utf-8 -*-
2# Copyright 2011 The ChromiumOS Authors
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""The class to show the banner."""
7
8
9import collections
10import datetime
11import time
12
13
14class ExperimentStatus(object):
15    """The status class."""
16
17    def __init__(self, experiment):
18        self.experiment = experiment
19        self.num_total = len(self.experiment.benchmark_runs)
20        self.completed = 0
21        self.new_job_start_time = time.time()
22        self.log_level = experiment.log_level
23
24    def _GetProgressBar(self, num_complete, num_total):
25        ret = "Done: %s%%" % int(100.0 * num_complete / num_total)
26        bar_length = 50
27        done_char = ">"
28        undone_char = " "
29        num_complete_chars = bar_length * num_complete // num_total
30        num_undone_chars = bar_length - num_complete_chars
31        ret += " [%s%s]" % (
32            num_complete_chars * done_char,
33            num_undone_chars * undone_char,
34        )
35        return ret
36
37    def GetProgressString(self):
38        """Get the elapsed_time, ETA."""
39        current_time = time.time()
40        if self.experiment.start_time:
41            elapsed_time = current_time - self.experiment.start_time
42        else:
43            elapsed_time = 0
44        try:
45            if self.completed != self.experiment.num_complete:
46                self.completed = self.experiment.num_complete
47                self.new_job_start_time = current_time
48            time_completed_jobs = elapsed_time - (
49                current_time - self.new_job_start_time
50            )
51            # eta is calculated as:
52            #   ETA = (num_jobs_not_yet_started * estimated_time_per_job)
53            #          + time_left_for_current_job
54            #
55            #   where
56            #        num_jobs_not_yet_started = (num_total - num_complete - 1)
57            #
58            #        estimated_time_per_job = time_completed_jobs / num_run_complete
59            #
60            #        time_left_for_current_job = estimated_time_per_job -
61            #                                    time_spent_so_far_on_current_job
62            #
63            #  The biggest problem with this calculation is its assumption that
64            #  all jobs have roughly the same running time (blatantly false!).
65            #
66            #  ETA can come out negative if the time spent on the current job is
67            #  greater than the estimated time per job (e.g. you're running the
68            #  first long job, after a series of short jobs).  For now, if that
69            #  happens, we set the ETA to "Unknown."
70            #
71            eta_seconds = float(
72                self.num_total - self.experiment.num_complete - 1
73            ) * time_completed_jobs / self.experiment.num_run_complete + (
74                time_completed_jobs / self.experiment.num_run_complete
75                - (current_time - self.new_job_start_time)
76            )
77
78            eta_seconds = int(eta_seconds)
79            if eta_seconds > 0:
80                eta = datetime.timedelta(seconds=eta_seconds)
81            else:
82                eta = "Unknown"
83        except ZeroDivisionError:
84            eta = "Unknown"
85        strings = []
86        strings.append(
87            "Current time: %s Elapsed: %s ETA: %s"
88            % (
89                datetime.datetime.now(),
90                datetime.timedelta(seconds=int(elapsed_time)),
91                eta,
92            )
93        )
94        strings.append(
95            self._GetProgressBar(self.experiment.num_complete, self.num_total)
96        )
97        return "\n".join(strings)
98
99    def GetStatusString(self):
100        """Get the status string of all the benchmark_runs."""
101        status_bins = collections.defaultdict(list)
102        for benchmark_run in self.experiment.benchmark_runs:
103            status_bins[benchmark_run.timeline.GetLastEvent()].append(
104                benchmark_run
105            )
106
107        status_strings = []
108        for key, val in status_bins.items():
109            if key == "RUNNING":
110                get_description = self._GetNamesAndIterations
111            else:
112                get_description = self._GetCompactNamesAndIterations
113            status_strings.append("%s: %s" % (key, get_description(val)))
114
115        thread_status = ""
116        thread_status_format = "Thread Status: \n{}\n"
117        if (
118            self.experiment.schedv2() is None
119            and self.experiment.log_level == "verbose"
120        ):
121            # Add the machine manager status.
122            thread_status = thread_status_format.format(
123                self.experiment.machine_manager.AsString()
124            )
125        elif self.experiment.schedv2():
126            # In schedv2 mode, we always print out thread status.
127            thread_status = thread_status_format.format(
128                self.experiment.schedv2().threads_status_as_string()
129            )
130
131        result = "{}{}".format(thread_status, "\n".join(status_strings))
132
133        return result
134
135    def _GetNamesAndIterations(self, benchmark_runs):
136        strings = []
137        t = time.time()
138        for benchmark_run in benchmark_runs:
139            t_last = benchmark_run.timeline.GetLastEventTime()
140            elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
141            strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
142        return " %s (%s)" % (len(strings), ", ".join(strings))
143
144    def _GetCompactNamesAndIterations(self, benchmark_runs):
145        grouped_benchmarks = collections.defaultdict(list)
146        for benchmark_run in benchmark_runs:
147            grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
148
149        output_segs = []
150        for label_name, label_runs in grouped_benchmarks.items():
151            strings = []
152            benchmark_iterations = collections.defaultdict(list)
153            for benchmark_run in label_runs:
154                assert benchmark_run.label.name == label_name
155                benchmark_name = benchmark_run.benchmark.name
156                benchmark_iterations[benchmark_name].append(
157                    benchmark_run.iteration
158                )
159            for key, val in benchmark_iterations.items():
160                val.sort()
161                iterations = ",".join(str(v) for v in val)
162                strings.append("{} [{}]".format(key, iterations))
163            output_segs.append(
164                "  " + label_name + ": " + ", ".join(strings) + "\n"
165            )
166
167        return " %s \n%s" % (len(benchmark_runs), "".join(output_segs))
168