xref: /aosp_15_r20/external/toolchain-utils/crosperf/results_report.py (revision 760c253c1ed00ce9abd48f8546f08516e57485fe)
1# -*- coding: utf-8 -*-
2# Copyright 2013 The ChromiumOS Authors
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""A module to handle the report format."""
7
8import datetime
9import functools
10import itertools
11import json
12import os
13import re
14import time
15
16from column_chart import ColumnChart
17from cros_utils.tabulator import AmeanResult
18from cros_utils.tabulator import Cell
19from cros_utils.tabulator import CoeffVarFormat
20from cros_utils.tabulator import CoeffVarResult
21from cros_utils.tabulator import Column
22from cros_utils.tabulator import Format
23from cros_utils.tabulator import GmeanRatioResult
24from cros_utils.tabulator import IterationResult
25from cros_utils.tabulator import LiteralResult
26from cros_utils.tabulator import MaxResult
27from cros_utils.tabulator import MinResult
28from cros_utils.tabulator import PValueFormat
29from cros_utils.tabulator import PValueResult
30from cros_utils.tabulator import RatioFormat
31from cros_utils.tabulator import RawResult
32from cros_utils.tabulator import SamplesTableGenerator
33from cros_utils.tabulator import StdResult
34from cros_utils.tabulator import TableFormatter
35from cros_utils.tabulator import TableGenerator
36from cros_utils.tabulator import TablePrinter
37from results_organizer import OrganizeResults
38import results_report_templates as templates
39from update_telemetry_defaults import TelemetryDefaults
40
41
42def ParseChromeosImage(chromeos_image):
43    """Parse the chromeos_image string for the image and version.
44
45  The chromeos_image string will probably be in one of two formats:
46  1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \
47     chromiumos_test_image.bin
48  2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \
49      chromiumos_test_image.bin
50
51  We parse these strings to find the 'chromeos_version' to store in the
52  json archive (without the .datatime bit in the first case); and also
53  the 'chromeos_image', which would be all of the first case, but only the
54  part after '/chroot/tmp' in the second case.
55
56  Args:
57    chromeos_image: string containing the path to the chromeos_image that
58    crosperf used for the test.
59
60  Returns:
61    version, image: The results of parsing the input string, as explained
62    above.
63  """
64    # Find the Chromeos Version, e.g. R45-2345.0.0.....
65    # chromeos_image should have been something like:
66    # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin"
67    if chromeos_image.endswith("/chromiumos_test_image.bin"):
68        full_version = chromeos_image.split("/")[-2]
69        # Strip the date and time off of local builds (which have the format
70        # "R43-2345.0.0.date-and-time").
71        version, _ = os.path.splitext(full_version)
72    else:
73        version = ""
74
75    # Find the chromeos image.  If it's somewhere in .../chroot/tmp/..., then
76    # it's an official image that got downloaded, so chop off the download path
77    # to make the official image name more clear.
78    official_image_path = "/chroot/tmp"
79    if official_image_path in chromeos_image:
80        image = chromeos_image.split(official_image_path, 1)[1]
81    else:
82        image = chromeos_image
83    return version, image
84
85
86def _AppendUntilLengthIs(gen, the_list, target_len):
87    """Appends to `list` until `list` is `target_len` elements long.
88
89    Uses `gen` to generate elements.
90    """
91    the_list.extend(gen() for _ in range(target_len - len(the_list)))
92    return the_list
93
94
95def _FilterPerfReport(event_threshold, report):
96    """Filters out entries with `< event_threshold` percent in a perf report."""
97
98    def filter_dict(m):
99        return {
100            fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold
101        }
102
103    return {event: filter_dict(m) for event, m in report.items()}
104
105
106class _PerfTable(object):
107    """Generates dicts from a perf table.
108
109    Dicts look like:
110    {'benchmark_name': {'perf_event_name': [LabelData]}}
111    where LabelData is a list of perf dicts, each perf dict coming from the same
112    label.
113    Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the
114    percentage of time spent in function_name).
115    """
116
117    def __init__(
118        self,
119        benchmark_names_and_iterations,
120        label_names,
121        read_perf_report,
122        event_threshold=None,
123    ):
124        """Constructor.
125
126        read_perf_report is a function that takes a label name, benchmark name, and
127        benchmark iteration, and returns a dictionary describing the perf output for
128        that given run.
129        """
130        self.event_threshold = event_threshold
131        self._label_indices = {name: i for i, name in enumerate(label_names)}
132        self.perf_data = {}
133        for label in label_names:
134            for bench_name, bench_iterations in benchmark_names_and_iterations:
135                for i in range(bench_iterations):
136                    report = read_perf_report(label, bench_name, i)
137                    self._ProcessPerfReport(report, label, bench_name, i)
138
139    def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration):
140        """Add the data from one run to the dict."""
141        perf_of_run = perf_report
142        if self.event_threshold is not None:
143            perf_of_run = _FilterPerfReport(self.event_threshold, perf_report)
144        if benchmark_name not in self.perf_data:
145            self.perf_data[benchmark_name] = {
146                event: [] for event in perf_of_run
147            }
148        ben_data = self.perf_data[benchmark_name]
149        label_index = self._label_indices[label]
150        for event in ben_data:
151            _AppendUntilLengthIs(list, ben_data[event], label_index + 1)
152            data_for_label = ben_data[event][label_index]
153            _AppendUntilLengthIs(dict, data_for_label, iteration + 1)
154            data_for_label[iteration] = (
155                perf_of_run[event] if perf_of_run else {}
156            )
157
158
159def _GetResultsTableHeader(ben_name, iterations):
160    benchmark_info = "Benchmark:  {0};  Iterations: {1}".format(
161        ben_name, iterations
162    )
163    cell = Cell()
164    cell.string_value = benchmark_info
165    cell.header = True
166    return [[cell]]
167
168
169def _GetDSOHeader(cwp_dso):
170    info = "CWP_DSO: %s" % cwp_dso
171    cell = Cell()
172    cell.string_value = info
173    cell.header = False
174    return [[cell]]
175
176
177def _ParseColumn(columns, iteration):
178    new_column = []
179    for column in columns:
180        if column.result.__class__.__name__ != "RawResult":
181            new_column.append(column)
182        else:
183            new_column.extend(
184                Column(LiteralResult(i), Format(), str(i + 1))
185                for i in range(iteration)
186            )
187    return new_column
188
189
190def _GetTables(benchmark_results, columns, table_type):
191    iter_counts = benchmark_results.iter_counts
192    result = benchmark_results.run_keyvals
193    tables = []
194    for bench_name, runs in result.items():
195        iterations = iter_counts[bench_name]
196        ben_table = _GetResultsTableHeader(bench_name, iterations)
197
198        all_runs_empty = all(not dict for label in runs for dict in label)
199        if all_runs_empty:
200            cell = Cell()
201            cell.string_value = (
202                "This benchmark contains no result."
203                " Is the benchmark name valid?"
204            )
205            cell_table = [[cell]]
206        else:
207            table = TableGenerator(
208                runs, benchmark_results.label_names
209            ).GetTable()
210            parsed_columns = _ParseColumn(columns, iterations)
211            tf = TableFormatter(table, parsed_columns)
212            cell_table = tf.GetCellTable(table_type)
213        tables.append(ben_table)
214        tables.append(cell_table)
215    return tables
216
217
218def _GetPerfTables(benchmark_results, columns, table_type):
219    p_table = _PerfTable(
220        benchmark_results.benchmark_names_and_iterations,
221        benchmark_results.label_names,
222        benchmark_results.read_perf_report,
223    )
224
225    tables = []
226    for benchmark in p_table.perf_data:
227        iterations = benchmark_results.iter_counts[benchmark]
228        ben_table = _GetResultsTableHeader(benchmark, iterations)
229        tables.append(ben_table)
230        benchmark_data = p_table.perf_data[benchmark]
231        table = []
232        for event in benchmark_data:
233            tg = TableGenerator(
234                benchmark_data[event],
235                benchmark_results.label_names,
236                sort=TableGenerator.SORT_BY_VALUES_DESC,
237            )
238            table = tg.GetTable(ResultsReport.PERF_ROWS)
239            parsed_columns = _ParseColumn(columns, iterations)
240            tf = TableFormatter(table, parsed_columns)
241            tf.GenerateCellTable(table_type)
242            tf.AddColumnName()
243            tf.AddLabelName()
244            tf.AddHeader(str(event))
245            table = tf.GetCellTable(table_type, headers=False)
246            tables.append(table)
247    return tables
248
249
250def _GetSamplesTables(benchmark_results, columns, table_type):
251    tables = []
252    dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso)
253    tables.append(dso_header_table)
254    (table, new_keyvals, iter_counts) = SamplesTableGenerator(
255        benchmark_results.run_keyvals,
256        benchmark_results.label_names,
257        benchmark_results.iter_counts,
258        benchmark_results.weights,
259    ).GetTable()
260    parsed_columns = _ParseColumn(columns, 1)
261    tf = TableFormatter(table, parsed_columns, samples_table=True)
262    cell_table = tf.GetCellTable(table_type)
263    tables.append(cell_table)
264    return (tables, new_keyvals, iter_counts)
265
266
267class ResultsReport(object):
268    """Class to handle the report format."""
269
270    MAX_COLOR_CODE = 255
271    PERF_ROWS = 5
272
273    def __init__(self, results):
274        self.benchmark_results = results
275
276    def _GetTablesWithColumns(self, columns, table_type, summary_type):
277        if summary_type == "perf":
278            get_tables = _GetPerfTables
279        elif summary_type == "samples":
280            get_tables = _GetSamplesTables
281        else:
282            get_tables = _GetTables
283        ret = get_tables(self.benchmark_results, columns, table_type)
284        # If we are generating a samples summary table, the return value of
285        # get_tables will be a tuple, and we will update the benchmark_results for
286        # composite benchmark so that full table can use it.
287        if isinstance(ret, tuple):
288            self.benchmark_results.run_keyvals = ret[1]
289            self.benchmark_results.iter_counts = ret[2]
290            ret = ret[0]
291        return ret
292
293    def GetFullTables(self, perf=False):
294        ignore_min_max = self.benchmark_results.ignore_min_max
295        columns = [
296            Column(RawResult(), Format()),
297            Column(MinResult(), Format()),
298            Column(MaxResult(), Format()),
299            Column(AmeanResult(ignore_min_max), Format()),
300            Column(StdResult(ignore_min_max), Format(), "StdDev"),
301            Column(
302                CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean"
303            ),
304            Column(
305                GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup"
306            ),
307            Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"),
308        ]
309        return self._GetTablesWithColumns(columns, "full", perf)
310
311    def GetSummaryTables(self, summary_type=""):
312        ignore_min_max = self.benchmark_results.ignore_min_max
313        columns = []
314        if summary_type == "samples":
315            columns += [
316                Column(IterationResult(), Format(), "Iterations [Pass:Fail]")
317            ]
318        columns += [
319            Column(
320                AmeanResult(ignore_min_max),
321                Format(),
322                "Weighted Samples Amean" if summary_type == "samples" else "",
323            ),
324            Column(StdResult(ignore_min_max), Format(), "StdDev"),
325            Column(
326                CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean"
327            ),
328            Column(
329                GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup"
330            ),
331            Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"),
332        ]
333        return self._GetTablesWithColumns(columns, "summary", summary_type)
334
335
336def _PrintTable(tables, out_to):
337    # tables may be None.
338    if not tables:
339        return ""
340
341    if out_to == "HTML":
342        out_type = TablePrinter.HTML
343    elif out_to == "PLAIN":
344        out_type = TablePrinter.PLAIN
345    elif out_to == "CONSOLE":
346        out_type = TablePrinter.CONSOLE
347    elif out_to == "TSV":
348        out_type = TablePrinter.TSV
349    elif out_to == "EMAIL":
350        out_type = TablePrinter.EMAIL
351    else:
352        raise ValueError("Invalid out_to value: %s" % (out_to,))
353
354    printers = (TablePrinter(table, out_type) for table in tables)
355    return "".join(printer.Print() for printer in printers)
356
357
358class TextResultsReport(ResultsReport):
359    """Class to generate text result report."""
360
361    H1_STR = "==========================================="
362    H2_STR = "-------------------------------------------"
363
364    def __init__(self, results, email=False, experiment=None):
365        super(TextResultsReport, self).__init__(results)
366        self.email = email
367        self.experiment = experiment
368
369    @staticmethod
370    def _MakeTitle(title):
371        header_line = TextResultsReport.H1_STR
372        # '' at the end gives one newline.
373        return "\n".join([header_line, title, header_line, ""])
374
375    @staticmethod
376    def _MakeSection(title, body):
377        header_line = TextResultsReport.H2_STR
378        # '\n' at the end gives us two newlines.
379        return "\n".join([header_line, title, header_line, body, "\n"])
380
381    @staticmethod
382    def FromExperiment(experiment, email=False):
383        results = BenchmarkResults.FromExperiment(experiment)
384        return TextResultsReport(results, email, experiment)
385
386    def GetStatusTable(self):
387        """Generate the status table by the tabulator."""
388        table = [["", ""]]
389        columns = [
390            Column(LiteralResult(iteration=0), Format(), "Status"),
391            Column(LiteralResult(iteration=1), Format(), "Failing Reason"),
392        ]
393
394        for benchmark_run in self.experiment.benchmark_runs:
395            status = [
396                benchmark_run.name,
397                [
398                    benchmark_run.timeline.GetLastEvent(),
399                    benchmark_run.failure_reason,
400                ],
401            ]
402            table.append(status)
403        cell_table = TableFormatter(table, columns).GetCellTable("status")
404        return [cell_table]
405
406    def GetTotalWaitCooldownTime(self):
407        """Get cooldown wait time in seconds from experiment benchmark runs.
408
409        Returns:
410          Dictionary {'dut': int(wait_time_in_seconds)}
411        """
412        waittime_dict = {}
413        for dut in self.experiment.machine_manager.GetMachines():
414            waittime_dict[dut.name] = dut.GetCooldownWaitTime()
415        return waittime_dict
416
417    def GetReport(self):
418        """Generate the report for email and console."""
419        output_type = "EMAIL" if self.email else "CONSOLE"
420        experiment = self.experiment
421
422        sections = []
423        if experiment is not None:
424            title_contents = "Results report for '%s'" % (experiment.name,)
425        else:
426            title_contents = "Results report"
427        sections.append(self._MakeTitle(title_contents))
428
429        if not self.benchmark_results.cwp_dso:
430            summary_table = _PrintTable(self.GetSummaryTables(), output_type)
431        else:
432            summary_table = _PrintTable(
433                self.GetSummaryTables(summary_type="samples"), output_type
434            )
435        sections.append(self._MakeSection("Summary", summary_table))
436
437        if experiment is not None:
438            table = _PrintTable(self.GetStatusTable(), output_type)
439            sections.append(self._MakeSection("Benchmark Run Status", table))
440
441        if not self.benchmark_results.cwp_dso:
442            perf_table = _PrintTable(
443                self.GetSummaryTables(summary_type="perf"), output_type
444            )
445            sections.append(self._MakeSection("Perf Data", perf_table))
446
447        if experiment is not None:
448            experiment_file = experiment.experiment_file
449            sections.append(
450                self._MakeSection("Experiment File", experiment_file)
451            )
452
453            cpu_info = experiment.machine_manager.GetAllCPUInfo(
454                experiment.labels
455            )
456            sections.append(self._MakeSection("CPUInfo", cpu_info))
457
458            totaltime = (
459                (time.time() - experiment.start_time)
460                if experiment.start_time
461                else 0
462            )
463            totaltime_str = "Total experiment time:\n%d min" % (totaltime // 60)
464            cooldown_waittime_list = ["Cooldown wait time:"]
465            # When running experiment on multiple DUTs cooldown wait time may vary
466            # on different devices. In addition its combined time may exceed total
467            # experiment time which will look weird but it is reasonable.
468            # For this matter print cooldown time per DUT.
469            for dut, waittime in sorted(
470                self.GetTotalWaitCooldownTime().items()
471            ):
472                cooldown_waittime_list.append(
473                    "DUT %s: %d min" % (dut, waittime // 60)
474                )
475            cooldown_waittime_str = "\n".join(cooldown_waittime_list)
476            sections.append(
477                self._MakeSection(
478                    "Duration",
479                    "\n\n".join([totaltime_str, cooldown_waittime_str]),
480                )
481            )
482
483        return "\n".join(sections)
484
485
486def _GetHTMLCharts(label_names, test_results):
487    charts = []
488    for item, runs in test_results.items():
489        # Fun fact: label_names is actually *entirely* useless as a param, since we
490        # never add headers. We still need to pass it anyway.
491        table = TableGenerator(runs, label_names).GetTable()
492        columns = [
493            Column(AmeanResult(), Format()),
494            Column(MinResult(), Format()),
495            Column(MaxResult(), Format()),
496        ]
497        tf = TableFormatter(table, columns)
498        data_table = tf.GetCellTable("full", headers=False)
499
500        for cur_row_data in data_table:
501            test_key = cur_row_data[0].string_value
502            title = "{0}: {1}".format(item, test_key.replace("/", ""))
503            chart = ColumnChart(title, 300, 200)
504            chart.AddColumn("Label", "string")
505            chart.AddColumn("Average", "number")
506            chart.AddColumn("Min", "number")
507            chart.AddColumn("Max", "number")
508            chart.AddSeries("Min", "line", "black")
509            chart.AddSeries("Max", "line", "black")
510            cur_index = 1
511            for label in label_names:
512                chart.AddRow(
513                    [
514                        label,
515                        cur_row_data[cur_index].value,
516                        cur_row_data[cur_index + 1].value,
517                        cur_row_data[cur_index + 2].value,
518                    ]
519                )
520                if isinstance(cur_row_data[cur_index].value, str):
521                    chart = None
522                    break
523                cur_index += 3
524            if chart:
525                charts.append(chart)
526    return charts
527
528
529class HTMLResultsReport(ResultsReport):
530    """Class to generate html result report."""
531
532    def __init__(self, benchmark_results, experiment=None):
533        super(HTMLResultsReport, self).__init__(benchmark_results)
534        self.experiment = experiment
535
536    @staticmethod
537    def FromExperiment(experiment):
538        return HTMLResultsReport(
539            BenchmarkResults.FromExperiment(experiment), experiment=experiment
540        )
541
542    def GetReport(self):
543        label_names = self.benchmark_results.label_names
544        test_results = self.benchmark_results.run_keyvals
545        charts = _GetHTMLCharts(label_names, test_results)
546        chart_javascript = "".join(chart.GetJavascript() for chart in charts)
547        chart_divs = "".join(chart.GetDiv() for chart in charts)
548
549        if not self.benchmark_results.cwp_dso:
550            summary_table = self.GetSummaryTables()
551            perf_table = self.GetSummaryTables(summary_type="perf")
552        else:
553            summary_table = self.GetSummaryTables(summary_type="samples")
554            perf_table = None
555        full_table = self.GetFullTables()
556
557        experiment_file = ""
558        if self.experiment is not None:
559            experiment_file = self.experiment.experiment_file
560        # Use kwargs for code readability, and so that testing is a bit easier.
561        return templates.GenerateHTMLPage(
562            perf_table=perf_table,
563            chart_js=chart_javascript,
564            summary_table=summary_table,
565            print_table=_PrintTable,
566            chart_divs=chart_divs,
567            full_table=full_table,
568            experiment_file=experiment_file,
569        )
570
571
572def ParseStandardPerfReport(report_data):
573    """Parses the output of `perf report`.
574
575    It'll parse the following:
576    {{garbage}}
577    # Samples: 1234M of event 'foo'
578
579    1.23% command shared_object location function::name
580
581    1.22% command shared_object location function2::name
582
583    # Samples: 999K of event 'bar'
584
585    0.23% command shared_object location function3::name
586    {{etc.}}
587
588    Into:
589      {'foo': {'function::name': 1.23, 'function2::name': 1.22},
590       'bar': {'function3::name': 0.23, etc.}}
591    """
592    # This function fails silently on its if it's handed a string (as opposed to a
593    # list of lines). So, auto-split if we do happen to get a string.
594    if isinstance(report_data, str):
595        report_data = report_data.splitlines()
596    # When switching to python3 catch the case when bytes are passed.
597    elif isinstance(report_data, bytes):
598        raise TypeError()
599
600    # Samples: N{K,M,G} of event 'event-name'
601    samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'")
602
603    # We expect lines like:
604    # N.NN%  command  samples  shared_object  [location] symbol
605    #
606    # Note that we're looking at stripped lines, so there is no space at the
607    # start.
608    perf_regex = re.compile(
609        r"^(\d+(?:.\d*)?)%"  # N.NN%
610        r"\s*\d+"  # samples count (ignored)
611        r"\s*\S+"  # command (ignored)
612        r"\s*\S+"  # shared_object (ignored)
613        r"\s*\[.\]"  # location (ignored)
614        r"\s*(\S.+)"  # function
615    )
616
617    stripped_lines = (l.strip() for l in report_data)
618    nonempty_lines = (l for l in stripped_lines if l)
619    # Ignore all lines before we see samples_regex
620    interesting_lines = itertools.dropwhile(
621        lambda x: not samples_regex.match(x), nonempty_lines
622    )
623
624    first_sample_line = next(interesting_lines, None)
625    # Went through the entire file without finding a 'samples' header. Quit.
626    if first_sample_line is None:
627        return {}
628
629    sample_name = samples_regex.match(first_sample_line).group(1)
630    current_result = {}
631    results = {sample_name: current_result}
632    for line in interesting_lines:
633        samples_match = samples_regex.match(line)
634        if samples_match:
635            sample_name = samples_match.group(1)
636            current_result = {}
637            results[sample_name] = current_result
638            continue
639
640        match = perf_regex.match(line)
641        if not match:
642            continue
643        percentage_str, func_name = match.groups()
644        try:
645            percentage = float(percentage_str)
646        except ValueError:
647            # Couldn't parse it; try to be "resilient".
648            continue
649        current_result[func_name] = percentage
650    return results
651
652
653def _ReadExperimentPerfReport(
654    results_directory, label_name, benchmark_name, benchmark_iteration
655):
656    """Reads a perf report for the given benchmark. Returns {} on failure.
657
658    The result should be a map of maps; it should look like:
659    {perf_event_name: {function_name: pct_time_spent}}, e.g.
660    {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}}
661    """
662    raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1)
663    dir_name = "".join(c for c in raw_dir_name if c.isalnum())
664    file_name = os.path.join(results_directory, dir_name, "perf.data.report.0")
665    try:
666        with open(file_name) as in_file:
667            return ParseStandardPerfReport(in_file)
668    except IOError:
669        # Yes, we swallow any IO-related errors.
670        return {}
671
672
673# Split out so that testing (specifically: mocking) is easier
674def _ExperimentToKeyvals(experiment, for_json_report):
675    """Converts an experiment to keyvals."""
676    return OrganizeResults(
677        experiment.benchmark_runs,
678        experiment.labels,
679        json_report=for_json_report,
680    )
681
682
683class BenchmarkResults(object):
684    """The minimum set of fields that any ResultsReport will take."""
685
686    def __init__(
687        self,
688        label_names,
689        benchmark_names_and_iterations,
690        run_keyvals,
691        ignore_min_max=False,
692        read_perf_report=None,
693        cwp_dso=None,
694        weights=None,
695    ):
696        if read_perf_report is None:
697
698            def _NoPerfReport(*_args, **_kwargs):
699                return {}
700
701            read_perf_report = _NoPerfReport
702
703        self.label_names = label_names
704        self.benchmark_names_and_iterations = benchmark_names_and_iterations
705        self.iter_counts = dict(benchmark_names_and_iterations)
706        self.run_keyvals = run_keyvals
707        self.ignore_min_max = ignore_min_max
708        self.read_perf_report = read_perf_report
709        self.cwp_dso = cwp_dso
710        self.weights = dict(weights) if weights else None
711
712    @staticmethod
713    def FromExperiment(experiment, for_json_report=False):
714        label_names = [label.name for label in experiment.labels]
715        benchmark_names_and_iterations = [
716            (benchmark.name, benchmark.iterations)
717            for benchmark in experiment.benchmarks
718        ]
719        run_keyvals = _ExperimentToKeyvals(experiment, for_json_report)
720        ignore_min_max = experiment.ignore_min_max
721        read_perf_report = functools.partial(
722            _ReadExperimentPerfReport, experiment.results_directory
723        )
724        cwp_dso = experiment.cwp_dso
725        weights = [
726            (benchmark.name, benchmark.weight)
727            for benchmark in experiment.benchmarks
728        ]
729        return BenchmarkResults(
730            label_names,
731            benchmark_names_and_iterations,
732            run_keyvals,
733            ignore_min_max,
734            read_perf_report,
735            cwp_dso,
736            weights,
737        )
738
739
740def _GetElemByName(name, from_list):
741    """Gets an element from the given list by its name field.
742
743    Raises an error if it doesn't find exactly one match.
744    """
745    elems = [e for e in from_list if e.name == name]
746    if len(elems) != 1:
747        raise ValueError(
748            "Expected 1 item named %s, found %d" % (name, len(elems))
749        )
750    return elems[0]
751
752
753def _Unlist(l):
754    """If l is a list, extracts the first element of l. Otherwise, returns l."""
755    return l[0] if isinstance(l, list) else l
756
757
758class JSONResultsReport(ResultsReport):
759    """Class that generates JSON reports for experiments."""
760
761    def __init__(
762        self,
763        benchmark_results,
764        benchmark_date=None,
765        benchmark_time=None,
766        experiment=None,
767        json_args=None,
768    ):
769        """Construct a JSONResultsReport.
770
771        json_args is the dict of arguments we pass to json.dumps in GetReport().
772        """
773        super(JSONResultsReport, self).__init__(benchmark_results)
774
775        defaults = TelemetryDefaults()
776        defaults.ReadDefaultsFile()
777        summary_field_defaults = defaults.GetDefault()
778        if summary_field_defaults is None:
779            summary_field_defaults = {}
780        self.summary_field_defaults = summary_field_defaults
781
782        if json_args is None:
783            json_args = {}
784        self.json_args = json_args
785
786        self.experiment = experiment
787        if not benchmark_date:
788            timestamp = datetime.datetime.strftime(
789                datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"
790            )
791            benchmark_date, benchmark_time = timestamp.split(" ")
792        self.date = benchmark_date
793        self.time = benchmark_time
794
795    @staticmethod
796    def FromExperiment(
797        experiment, benchmark_date=None, benchmark_time=None, json_args=None
798    ):
799        benchmark_results = BenchmarkResults.FromExperiment(
800            experiment, for_json_report=True
801        )
802        return JSONResultsReport(
803            benchmark_results,
804            benchmark_date,
805            benchmark_time,
806            experiment,
807            json_args,
808        )
809
810    def GetReportObjectIgnoringExperiment(self):
811        """Gets the JSON report object specifically for the output data.
812
813        Ignores any experiment-specific fields (e.g. board, machine checksum, ...).
814        """
815        benchmark_results = self.benchmark_results
816        label_names = benchmark_results.label_names
817        summary_field_defaults = self.summary_field_defaults
818        final_results = []
819        for test, test_results in benchmark_results.run_keyvals.items():
820            for label_name, label_results in zip(label_names, test_results):
821                for iter_results in label_results:
822                    passed = iter_results.get("retval") == 0
823                    json_results = {
824                        "date": self.date,
825                        "time": self.time,
826                        "label": label_name,
827                        "test_name": test,
828                        "pass": passed,
829                    }
830                    final_results.append(json_results)
831
832                    if not passed:
833                        continue
834
835                    # Get overall results.
836                    summary_fields = summary_field_defaults.get(test)
837                    if summary_fields is not None:
838                        value = []
839                        json_results["overall_result"] = value
840                        for f in summary_fields:
841                            v = iter_results.get(f)
842                            if v is None:
843                                continue
844                            # New telemetry results format: sometimes we get a list of lists
845                            # now.
846                            v = _Unlist(_Unlist(v))
847                            value.append((f, float(v)))
848
849                    # Get detailed results.
850                    detail_results = {}
851                    json_results["detailed_results"] = detail_results
852                    for k, v in iter_results.items():
853                        if (
854                            k == "retval"
855                            or k == "PASS"
856                            or k == ["PASS"]
857                            or v == "PASS"
858                        ):
859                            continue
860
861                        v = _Unlist(v)
862                        if "machine" in k:
863                            json_results[k] = v
864                        elif v is not None:
865                            if isinstance(v, list):
866                                detail_results[k] = [float(d) for d in v]
867                            else:
868                                detail_results[k] = float(v)
869        return final_results
870
871    def GetReportObject(self):
872        """Generate the JSON report, returning it as a python object."""
873        report_list = self.GetReportObjectIgnoringExperiment()
874        if self.experiment is not None:
875            self._AddExperimentSpecificFields(report_list)
876        return report_list
877
878    def _AddExperimentSpecificFields(self, report_list):
879        """Add experiment-specific data to the JSON report."""
880        board = self.experiment.labels[0].board
881        manager = self.experiment.machine_manager
882        for report in report_list:
883            label_name = report["label"]
884            label = _GetElemByName(label_name, self.experiment.labels)
885
886            img_path = os.path.realpath(
887                os.path.expanduser(label.chromeos_image)
888            )
889            ver, img = ParseChromeosImage(img_path)
890
891            report.update(
892                {
893                    "board": board,
894                    "chromeos_image": img,
895                    "chromeos_version": ver,
896                    "chrome_version": label.chrome_version,
897                    "compiler": label.compiler,
898                }
899            )
900
901            if not report["pass"]:
902                continue
903            if "machine_checksum" not in report:
904                report["machine_checksum"] = manager.machine_checksum[
905                    label_name
906                ]
907            if "machine_string" not in report:
908                report["machine_string"] = manager.machine_checksum_string[
909                    label_name
910                ]
911
912    def GetReport(self):
913        """Dump the results of self.GetReportObject() to a string as JSON."""
914        # This exists for consistency with the other GetReport methods.
915        # Specifically, they all return strings, so it's a bit awkward if the JSON
916        # results reporter returns an object.
917        return json.dumps(self.GetReportObject(), **self.json_args)
918