1*760c253cSXin Li# -*- coding: utf-8 -*- 2*760c253cSXin Li# Copyright 2013 The ChromiumOS Authors 3*760c253cSXin Li# Use of this source code is governed by a BSD-style license that can be 4*760c253cSXin Li# found in the LICENSE file. 5*760c253cSXin Li 6*760c253cSXin Li"""A module to handle the report format.""" 7*760c253cSXin Li 8*760c253cSXin Liimport datetime 9*760c253cSXin Liimport functools 10*760c253cSXin Liimport itertools 11*760c253cSXin Liimport json 12*760c253cSXin Liimport os 13*760c253cSXin Liimport re 14*760c253cSXin Liimport time 15*760c253cSXin Li 16*760c253cSXin Lifrom column_chart import ColumnChart 17*760c253cSXin Lifrom cros_utils.tabulator import AmeanResult 18*760c253cSXin Lifrom cros_utils.tabulator import Cell 19*760c253cSXin Lifrom cros_utils.tabulator import CoeffVarFormat 20*760c253cSXin Lifrom cros_utils.tabulator import CoeffVarResult 21*760c253cSXin Lifrom cros_utils.tabulator import Column 22*760c253cSXin Lifrom cros_utils.tabulator import Format 23*760c253cSXin Lifrom cros_utils.tabulator import GmeanRatioResult 24*760c253cSXin Lifrom cros_utils.tabulator import IterationResult 25*760c253cSXin Lifrom cros_utils.tabulator import LiteralResult 26*760c253cSXin Lifrom cros_utils.tabulator import MaxResult 27*760c253cSXin Lifrom cros_utils.tabulator import MinResult 28*760c253cSXin Lifrom cros_utils.tabulator import PValueFormat 29*760c253cSXin Lifrom cros_utils.tabulator import PValueResult 30*760c253cSXin Lifrom cros_utils.tabulator import RatioFormat 31*760c253cSXin Lifrom cros_utils.tabulator import RawResult 32*760c253cSXin Lifrom cros_utils.tabulator import SamplesTableGenerator 33*760c253cSXin Lifrom cros_utils.tabulator import StdResult 34*760c253cSXin Lifrom cros_utils.tabulator import TableFormatter 35*760c253cSXin Lifrom cros_utils.tabulator import TableGenerator 36*760c253cSXin Lifrom cros_utils.tabulator import TablePrinter 37*760c253cSXin Lifrom results_organizer import OrganizeResults 38*760c253cSXin Liimport results_report_templates as templates 39*760c253cSXin Lifrom update_telemetry_defaults import TelemetryDefaults 40*760c253cSXin Li 41*760c253cSXin Li 42*760c253cSXin Lidef ParseChromeosImage(chromeos_image): 43*760c253cSXin Li """Parse the chromeos_image string for the image and version. 44*760c253cSXin Li 45*760c253cSXin Li The chromeos_image string will probably be in one of two formats: 46*760c253cSXin Li 1: <path-to-chroot>/src/build/images/<board>/<ChromeOS-version>.<datetime>/ \ 47*760c253cSXin Li chromiumos_test_image.bin 48*760c253cSXin Li 2: <path-to-chroot>/chroot/tmp/<buildbot-build>/<ChromeOS-version>/ \ 49*760c253cSXin Li chromiumos_test_image.bin 50*760c253cSXin Li 51*760c253cSXin Li We parse these strings to find the 'chromeos_version' to store in the 52*760c253cSXin Li json archive (without the .datatime bit in the first case); and also 53*760c253cSXin Li the 'chromeos_image', which would be all of the first case, but only the 54*760c253cSXin Li part after '/chroot/tmp' in the second case. 55*760c253cSXin Li 56*760c253cSXin Li Args: 57*760c253cSXin Li chromeos_image: string containing the path to the chromeos_image that 58*760c253cSXin Li crosperf used for the test. 59*760c253cSXin Li 60*760c253cSXin Li Returns: 61*760c253cSXin Li version, image: The results of parsing the input string, as explained 62*760c253cSXin Li above. 63*760c253cSXin Li """ 64*760c253cSXin Li # Find the Chromeos Version, e.g. R45-2345.0.0..... 65*760c253cSXin Li # chromeos_image should have been something like: 66*760c253cSXin Li # <path>/<board-trybot-release>/<chromeos-version>/chromiumos_test_image.bin" 67*760c253cSXin Li if chromeos_image.endswith("/chromiumos_test_image.bin"): 68*760c253cSXin Li full_version = chromeos_image.split("/")[-2] 69*760c253cSXin Li # Strip the date and time off of local builds (which have the format 70*760c253cSXin Li # "R43-2345.0.0.date-and-time"). 71*760c253cSXin Li version, _ = os.path.splitext(full_version) 72*760c253cSXin Li else: 73*760c253cSXin Li version = "" 74*760c253cSXin Li 75*760c253cSXin Li # Find the chromeos image. If it's somewhere in .../chroot/tmp/..., then 76*760c253cSXin Li # it's an official image that got downloaded, so chop off the download path 77*760c253cSXin Li # to make the official image name more clear. 78*760c253cSXin Li official_image_path = "/chroot/tmp" 79*760c253cSXin Li if official_image_path in chromeos_image: 80*760c253cSXin Li image = chromeos_image.split(official_image_path, 1)[1] 81*760c253cSXin Li else: 82*760c253cSXin Li image = chromeos_image 83*760c253cSXin Li return version, image 84*760c253cSXin Li 85*760c253cSXin Li 86*760c253cSXin Lidef _AppendUntilLengthIs(gen, the_list, target_len): 87*760c253cSXin Li """Appends to `list` until `list` is `target_len` elements long. 88*760c253cSXin Li 89*760c253cSXin Li Uses `gen` to generate elements. 90*760c253cSXin Li """ 91*760c253cSXin Li the_list.extend(gen() for _ in range(target_len - len(the_list))) 92*760c253cSXin Li return the_list 93*760c253cSXin Li 94*760c253cSXin Li 95*760c253cSXin Lidef _FilterPerfReport(event_threshold, report): 96*760c253cSXin Li """Filters out entries with `< event_threshold` percent in a perf report.""" 97*760c253cSXin Li 98*760c253cSXin Li def filter_dict(m): 99*760c253cSXin Li return { 100*760c253cSXin Li fn_name: pct for fn_name, pct in m.items() if pct >= event_threshold 101*760c253cSXin Li } 102*760c253cSXin Li 103*760c253cSXin Li return {event: filter_dict(m) for event, m in report.items()} 104*760c253cSXin Li 105*760c253cSXin Li 106*760c253cSXin Liclass _PerfTable(object): 107*760c253cSXin Li """Generates dicts from a perf table. 108*760c253cSXin Li 109*760c253cSXin Li Dicts look like: 110*760c253cSXin Li {'benchmark_name': {'perf_event_name': [LabelData]}} 111*760c253cSXin Li where LabelData is a list of perf dicts, each perf dict coming from the same 112*760c253cSXin Li label. 113*760c253cSXin Li Each perf dict looks like {'function_name': 0.10, ...} (where 0.10 is the 114*760c253cSXin Li percentage of time spent in function_name). 115*760c253cSXin Li """ 116*760c253cSXin Li 117*760c253cSXin Li def __init__( 118*760c253cSXin Li self, 119*760c253cSXin Li benchmark_names_and_iterations, 120*760c253cSXin Li label_names, 121*760c253cSXin Li read_perf_report, 122*760c253cSXin Li event_threshold=None, 123*760c253cSXin Li ): 124*760c253cSXin Li """Constructor. 125*760c253cSXin Li 126*760c253cSXin Li read_perf_report is a function that takes a label name, benchmark name, and 127*760c253cSXin Li benchmark iteration, and returns a dictionary describing the perf output for 128*760c253cSXin Li that given run. 129*760c253cSXin Li """ 130*760c253cSXin Li self.event_threshold = event_threshold 131*760c253cSXin Li self._label_indices = {name: i for i, name in enumerate(label_names)} 132*760c253cSXin Li self.perf_data = {} 133*760c253cSXin Li for label in label_names: 134*760c253cSXin Li for bench_name, bench_iterations in benchmark_names_and_iterations: 135*760c253cSXin Li for i in range(bench_iterations): 136*760c253cSXin Li report = read_perf_report(label, bench_name, i) 137*760c253cSXin Li self._ProcessPerfReport(report, label, bench_name, i) 138*760c253cSXin Li 139*760c253cSXin Li def _ProcessPerfReport(self, perf_report, label, benchmark_name, iteration): 140*760c253cSXin Li """Add the data from one run to the dict.""" 141*760c253cSXin Li perf_of_run = perf_report 142*760c253cSXin Li if self.event_threshold is not None: 143*760c253cSXin Li perf_of_run = _FilterPerfReport(self.event_threshold, perf_report) 144*760c253cSXin Li if benchmark_name not in self.perf_data: 145*760c253cSXin Li self.perf_data[benchmark_name] = { 146*760c253cSXin Li event: [] for event in perf_of_run 147*760c253cSXin Li } 148*760c253cSXin Li ben_data = self.perf_data[benchmark_name] 149*760c253cSXin Li label_index = self._label_indices[label] 150*760c253cSXin Li for event in ben_data: 151*760c253cSXin Li _AppendUntilLengthIs(list, ben_data[event], label_index + 1) 152*760c253cSXin Li data_for_label = ben_data[event][label_index] 153*760c253cSXin Li _AppendUntilLengthIs(dict, data_for_label, iteration + 1) 154*760c253cSXin Li data_for_label[iteration] = ( 155*760c253cSXin Li perf_of_run[event] if perf_of_run else {} 156*760c253cSXin Li ) 157*760c253cSXin Li 158*760c253cSXin Li 159*760c253cSXin Lidef _GetResultsTableHeader(ben_name, iterations): 160*760c253cSXin Li benchmark_info = "Benchmark: {0}; Iterations: {1}".format( 161*760c253cSXin Li ben_name, iterations 162*760c253cSXin Li ) 163*760c253cSXin Li cell = Cell() 164*760c253cSXin Li cell.string_value = benchmark_info 165*760c253cSXin Li cell.header = True 166*760c253cSXin Li return [[cell]] 167*760c253cSXin Li 168*760c253cSXin Li 169*760c253cSXin Lidef _GetDSOHeader(cwp_dso): 170*760c253cSXin Li info = "CWP_DSO: %s" % cwp_dso 171*760c253cSXin Li cell = Cell() 172*760c253cSXin Li cell.string_value = info 173*760c253cSXin Li cell.header = False 174*760c253cSXin Li return [[cell]] 175*760c253cSXin Li 176*760c253cSXin Li 177*760c253cSXin Lidef _ParseColumn(columns, iteration): 178*760c253cSXin Li new_column = [] 179*760c253cSXin Li for column in columns: 180*760c253cSXin Li if column.result.__class__.__name__ != "RawResult": 181*760c253cSXin Li new_column.append(column) 182*760c253cSXin Li else: 183*760c253cSXin Li new_column.extend( 184*760c253cSXin Li Column(LiteralResult(i), Format(), str(i + 1)) 185*760c253cSXin Li for i in range(iteration) 186*760c253cSXin Li ) 187*760c253cSXin Li return new_column 188*760c253cSXin Li 189*760c253cSXin Li 190*760c253cSXin Lidef _GetTables(benchmark_results, columns, table_type): 191*760c253cSXin Li iter_counts = benchmark_results.iter_counts 192*760c253cSXin Li result = benchmark_results.run_keyvals 193*760c253cSXin Li tables = [] 194*760c253cSXin Li for bench_name, runs in result.items(): 195*760c253cSXin Li iterations = iter_counts[bench_name] 196*760c253cSXin Li ben_table = _GetResultsTableHeader(bench_name, iterations) 197*760c253cSXin Li 198*760c253cSXin Li all_runs_empty = all(not dict for label in runs for dict in label) 199*760c253cSXin Li if all_runs_empty: 200*760c253cSXin Li cell = Cell() 201*760c253cSXin Li cell.string_value = ( 202*760c253cSXin Li "This benchmark contains no result." 203*760c253cSXin Li " Is the benchmark name valid?" 204*760c253cSXin Li ) 205*760c253cSXin Li cell_table = [[cell]] 206*760c253cSXin Li else: 207*760c253cSXin Li table = TableGenerator( 208*760c253cSXin Li runs, benchmark_results.label_names 209*760c253cSXin Li ).GetTable() 210*760c253cSXin Li parsed_columns = _ParseColumn(columns, iterations) 211*760c253cSXin Li tf = TableFormatter(table, parsed_columns) 212*760c253cSXin Li cell_table = tf.GetCellTable(table_type) 213*760c253cSXin Li tables.append(ben_table) 214*760c253cSXin Li tables.append(cell_table) 215*760c253cSXin Li return tables 216*760c253cSXin Li 217*760c253cSXin Li 218*760c253cSXin Lidef _GetPerfTables(benchmark_results, columns, table_type): 219*760c253cSXin Li p_table = _PerfTable( 220*760c253cSXin Li benchmark_results.benchmark_names_and_iterations, 221*760c253cSXin Li benchmark_results.label_names, 222*760c253cSXin Li benchmark_results.read_perf_report, 223*760c253cSXin Li ) 224*760c253cSXin Li 225*760c253cSXin Li tables = [] 226*760c253cSXin Li for benchmark in p_table.perf_data: 227*760c253cSXin Li iterations = benchmark_results.iter_counts[benchmark] 228*760c253cSXin Li ben_table = _GetResultsTableHeader(benchmark, iterations) 229*760c253cSXin Li tables.append(ben_table) 230*760c253cSXin Li benchmark_data = p_table.perf_data[benchmark] 231*760c253cSXin Li table = [] 232*760c253cSXin Li for event in benchmark_data: 233*760c253cSXin Li tg = TableGenerator( 234*760c253cSXin Li benchmark_data[event], 235*760c253cSXin Li benchmark_results.label_names, 236*760c253cSXin Li sort=TableGenerator.SORT_BY_VALUES_DESC, 237*760c253cSXin Li ) 238*760c253cSXin Li table = tg.GetTable(ResultsReport.PERF_ROWS) 239*760c253cSXin Li parsed_columns = _ParseColumn(columns, iterations) 240*760c253cSXin Li tf = TableFormatter(table, parsed_columns) 241*760c253cSXin Li tf.GenerateCellTable(table_type) 242*760c253cSXin Li tf.AddColumnName() 243*760c253cSXin Li tf.AddLabelName() 244*760c253cSXin Li tf.AddHeader(str(event)) 245*760c253cSXin Li table = tf.GetCellTable(table_type, headers=False) 246*760c253cSXin Li tables.append(table) 247*760c253cSXin Li return tables 248*760c253cSXin Li 249*760c253cSXin Li 250*760c253cSXin Lidef _GetSamplesTables(benchmark_results, columns, table_type): 251*760c253cSXin Li tables = [] 252*760c253cSXin Li dso_header_table = _GetDSOHeader(benchmark_results.cwp_dso) 253*760c253cSXin Li tables.append(dso_header_table) 254*760c253cSXin Li (table, new_keyvals, iter_counts) = SamplesTableGenerator( 255*760c253cSXin Li benchmark_results.run_keyvals, 256*760c253cSXin Li benchmark_results.label_names, 257*760c253cSXin Li benchmark_results.iter_counts, 258*760c253cSXin Li benchmark_results.weights, 259*760c253cSXin Li ).GetTable() 260*760c253cSXin Li parsed_columns = _ParseColumn(columns, 1) 261*760c253cSXin Li tf = TableFormatter(table, parsed_columns, samples_table=True) 262*760c253cSXin Li cell_table = tf.GetCellTable(table_type) 263*760c253cSXin Li tables.append(cell_table) 264*760c253cSXin Li return (tables, new_keyvals, iter_counts) 265*760c253cSXin Li 266*760c253cSXin Li 267*760c253cSXin Liclass ResultsReport(object): 268*760c253cSXin Li """Class to handle the report format.""" 269*760c253cSXin Li 270*760c253cSXin Li MAX_COLOR_CODE = 255 271*760c253cSXin Li PERF_ROWS = 5 272*760c253cSXin Li 273*760c253cSXin Li def __init__(self, results): 274*760c253cSXin Li self.benchmark_results = results 275*760c253cSXin Li 276*760c253cSXin Li def _GetTablesWithColumns(self, columns, table_type, summary_type): 277*760c253cSXin Li if summary_type == "perf": 278*760c253cSXin Li get_tables = _GetPerfTables 279*760c253cSXin Li elif summary_type == "samples": 280*760c253cSXin Li get_tables = _GetSamplesTables 281*760c253cSXin Li else: 282*760c253cSXin Li get_tables = _GetTables 283*760c253cSXin Li ret = get_tables(self.benchmark_results, columns, table_type) 284*760c253cSXin Li # If we are generating a samples summary table, the return value of 285*760c253cSXin Li # get_tables will be a tuple, and we will update the benchmark_results for 286*760c253cSXin Li # composite benchmark so that full table can use it. 287*760c253cSXin Li if isinstance(ret, tuple): 288*760c253cSXin Li self.benchmark_results.run_keyvals = ret[1] 289*760c253cSXin Li self.benchmark_results.iter_counts = ret[2] 290*760c253cSXin Li ret = ret[0] 291*760c253cSXin Li return ret 292*760c253cSXin Li 293*760c253cSXin Li def GetFullTables(self, perf=False): 294*760c253cSXin Li ignore_min_max = self.benchmark_results.ignore_min_max 295*760c253cSXin Li columns = [ 296*760c253cSXin Li Column(RawResult(), Format()), 297*760c253cSXin Li Column(MinResult(), Format()), 298*760c253cSXin Li Column(MaxResult(), Format()), 299*760c253cSXin Li Column(AmeanResult(ignore_min_max), Format()), 300*760c253cSXin Li Column(StdResult(ignore_min_max), Format(), "StdDev"), 301*760c253cSXin Li Column( 302*760c253cSXin Li CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean" 303*760c253cSXin Li ), 304*760c253cSXin Li Column( 305*760c253cSXin Li GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup" 306*760c253cSXin Li ), 307*760c253cSXin Li Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"), 308*760c253cSXin Li ] 309*760c253cSXin Li return self._GetTablesWithColumns(columns, "full", perf) 310*760c253cSXin Li 311*760c253cSXin Li def GetSummaryTables(self, summary_type=""): 312*760c253cSXin Li ignore_min_max = self.benchmark_results.ignore_min_max 313*760c253cSXin Li columns = [] 314*760c253cSXin Li if summary_type == "samples": 315*760c253cSXin Li columns += [ 316*760c253cSXin Li Column(IterationResult(), Format(), "Iterations [Pass:Fail]") 317*760c253cSXin Li ] 318*760c253cSXin Li columns += [ 319*760c253cSXin Li Column( 320*760c253cSXin Li AmeanResult(ignore_min_max), 321*760c253cSXin Li Format(), 322*760c253cSXin Li "Weighted Samples Amean" if summary_type == "samples" else "", 323*760c253cSXin Li ), 324*760c253cSXin Li Column(StdResult(ignore_min_max), Format(), "StdDev"), 325*760c253cSXin Li Column( 326*760c253cSXin Li CoeffVarResult(ignore_min_max), CoeffVarFormat(), "StdDev/Mean" 327*760c253cSXin Li ), 328*760c253cSXin Li Column( 329*760c253cSXin Li GmeanRatioResult(ignore_min_max), RatioFormat(), "GmeanSpeedup" 330*760c253cSXin Li ), 331*760c253cSXin Li Column(PValueResult(ignore_min_max), PValueFormat(), "p-value"), 332*760c253cSXin Li ] 333*760c253cSXin Li return self._GetTablesWithColumns(columns, "summary", summary_type) 334*760c253cSXin Li 335*760c253cSXin Li 336*760c253cSXin Lidef _PrintTable(tables, out_to): 337*760c253cSXin Li # tables may be None. 338*760c253cSXin Li if not tables: 339*760c253cSXin Li return "" 340*760c253cSXin Li 341*760c253cSXin Li if out_to == "HTML": 342*760c253cSXin Li out_type = TablePrinter.HTML 343*760c253cSXin Li elif out_to == "PLAIN": 344*760c253cSXin Li out_type = TablePrinter.PLAIN 345*760c253cSXin Li elif out_to == "CONSOLE": 346*760c253cSXin Li out_type = TablePrinter.CONSOLE 347*760c253cSXin Li elif out_to == "TSV": 348*760c253cSXin Li out_type = TablePrinter.TSV 349*760c253cSXin Li elif out_to == "EMAIL": 350*760c253cSXin Li out_type = TablePrinter.EMAIL 351*760c253cSXin Li else: 352*760c253cSXin Li raise ValueError("Invalid out_to value: %s" % (out_to,)) 353*760c253cSXin Li 354*760c253cSXin Li printers = (TablePrinter(table, out_type) for table in tables) 355*760c253cSXin Li return "".join(printer.Print() for printer in printers) 356*760c253cSXin Li 357*760c253cSXin Li 358*760c253cSXin Liclass TextResultsReport(ResultsReport): 359*760c253cSXin Li """Class to generate text result report.""" 360*760c253cSXin Li 361*760c253cSXin Li H1_STR = "===========================================" 362*760c253cSXin Li H2_STR = "-------------------------------------------" 363*760c253cSXin Li 364*760c253cSXin Li def __init__(self, results, email=False, experiment=None): 365*760c253cSXin Li super(TextResultsReport, self).__init__(results) 366*760c253cSXin Li self.email = email 367*760c253cSXin Li self.experiment = experiment 368*760c253cSXin Li 369*760c253cSXin Li @staticmethod 370*760c253cSXin Li def _MakeTitle(title): 371*760c253cSXin Li header_line = TextResultsReport.H1_STR 372*760c253cSXin Li # '' at the end gives one newline. 373*760c253cSXin Li return "\n".join([header_line, title, header_line, ""]) 374*760c253cSXin Li 375*760c253cSXin Li @staticmethod 376*760c253cSXin Li def _MakeSection(title, body): 377*760c253cSXin Li header_line = TextResultsReport.H2_STR 378*760c253cSXin Li # '\n' at the end gives us two newlines. 379*760c253cSXin Li return "\n".join([header_line, title, header_line, body, "\n"]) 380*760c253cSXin Li 381*760c253cSXin Li @staticmethod 382*760c253cSXin Li def FromExperiment(experiment, email=False): 383*760c253cSXin Li results = BenchmarkResults.FromExperiment(experiment) 384*760c253cSXin Li return TextResultsReport(results, email, experiment) 385*760c253cSXin Li 386*760c253cSXin Li def GetStatusTable(self): 387*760c253cSXin Li """Generate the status table by the tabulator.""" 388*760c253cSXin Li table = [["", ""]] 389*760c253cSXin Li columns = [ 390*760c253cSXin Li Column(LiteralResult(iteration=0), Format(), "Status"), 391*760c253cSXin Li Column(LiteralResult(iteration=1), Format(), "Failing Reason"), 392*760c253cSXin Li ] 393*760c253cSXin Li 394*760c253cSXin Li for benchmark_run in self.experiment.benchmark_runs: 395*760c253cSXin Li status = [ 396*760c253cSXin Li benchmark_run.name, 397*760c253cSXin Li [ 398*760c253cSXin Li benchmark_run.timeline.GetLastEvent(), 399*760c253cSXin Li benchmark_run.failure_reason, 400*760c253cSXin Li ], 401*760c253cSXin Li ] 402*760c253cSXin Li table.append(status) 403*760c253cSXin Li cell_table = TableFormatter(table, columns).GetCellTable("status") 404*760c253cSXin Li return [cell_table] 405*760c253cSXin Li 406*760c253cSXin Li def GetTotalWaitCooldownTime(self): 407*760c253cSXin Li """Get cooldown wait time in seconds from experiment benchmark runs. 408*760c253cSXin Li 409*760c253cSXin Li Returns: 410*760c253cSXin Li Dictionary {'dut': int(wait_time_in_seconds)} 411*760c253cSXin Li """ 412*760c253cSXin Li waittime_dict = {} 413*760c253cSXin Li for dut in self.experiment.machine_manager.GetMachines(): 414*760c253cSXin Li waittime_dict[dut.name] = dut.GetCooldownWaitTime() 415*760c253cSXin Li return waittime_dict 416*760c253cSXin Li 417*760c253cSXin Li def GetReport(self): 418*760c253cSXin Li """Generate the report for email and console.""" 419*760c253cSXin Li output_type = "EMAIL" if self.email else "CONSOLE" 420*760c253cSXin Li experiment = self.experiment 421*760c253cSXin Li 422*760c253cSXin Li sections = [] 423*760c253cSXin Li if experiment is not None: 424*760c253cSXin Li title_contents = "Results report for '%s'" % (experiment.name,) 425*760c253cSXin Li else: 426*760c253cSXin Li title_contents = "Results report" 427*760c253cSXin Li sections.append(self._MakeTitle(title_contents)) 428*760c253cSXin Li 429*760c253cSXin Li if not self.benchmark_results.cwp_dso: 430*760c253cSXin Li summary_table = _PrintTable(self.GetSummaryTables(), output_type) 431*760c253cSXin Li else: 432*760c253cSXin Li summary_table = _PrintTable( 433*760c253cSXin Li self.GetSummaryTables(summary_type="samples"), output_type 434*760c253cSXin Li ) 435*760c253cSXin Li sections.append(self._MakeSection("Summary", summary_table)) 436*760c253cSXin Li 437*760c253cSXin Li if experiment is not None: 438*760c253cSXin Li table = _PrintTable(self.GetStatusTable(), output_type) 439*760c253cSXin Li sections.append(self._MakeSection("Benchmark Run Status", table)) 440*760c253cSXin Li 441*760c253cSXin Li if not self.benchmark_results.cwp_dso: 442*760c253cSXin Li perf_table = _PrintTable( 443*760c253cSXin Li self.GetSummaryTables(summary_type="perf"), output_type 444*760c253cSXin Li ) 445*760c253cSXin Li sections.append(self._MakeSection("Perf Data", perf_table)) 446*760c253cSXin Li 447*760c253cSXin Li if experiment is not None: 448*760c253cSXin Li experiment_file = experiment.experiment_file 449*760c253cSXin Li sections.append( 450*760c253cSXin Li self._MakeSection("Experiment File", experiment_file) 451*760c253cSXin Li ) 452*760c253cSXin Li 453*760c253cSXin Li cpu_info = experiment.machine_manager.GetAllCPUInfo( 454*760c253cSXin Li experiment.labels 455*760c253cSXin Li ) 456*760c253cSXin Li sections.append(self._MakeSection("CPUInfo", cpu_info)) 457*760c253cSXin Li 458*760c253cSXin Li totaltime = ( 459*760c253cSXin Li (time.time() - experiment.start_time) 460*760c253cSXin Li if experiment.start_time 461*760c253cSXin Li else 0 462*760c253cSXin Li ) 463*760c253cSXin Li totaltime_str = "Total experiment time:\n%d min" % (totaltime // 60) 464*760c253cSXin Li cooldown_waittime_list = ["Cooldown wait time:"] 465*760c253cSXin Li # When running experiment on multiple DUTs cooldown wait time may vary 466*760c253cSXin Li # on different devices. In addition its combined time may exceed total 467*760c253cSXin Li # experiment time which will look weird but it is reasonable. 468*760c253cSXin Li # For this matter print cooldown time per DUT. 469*760c253cSXin Li for dut, waittime in sorted( 470*760c253cSXin Li self.GetTotalWaitCooldownTime().items() 471*760c253cSXin Li ): 472*760c253cSXin Li cooldown_waittime_list.append( 473*760c253cSXin Li "DUT %s: %d min" % (dut, waittime // 60) 474*760c253cSXin Li ) 475*760c253cSXin Li cooldown_waittime_str = "\n".join(cooldown_waittime_list) 476*760c253cSXin Li sections.append( 477*760c253cSXin Li self._MakeSection( 478*760c253cSXin Li "Duration", 479*760c253cSXin Li "\n\n".join([totaltime_str, cooldown_waittime_str]), 480*760c253cSXin Li ) 481*760c253cSXin Li ) 482*760c253cSXin Li 483*760c253cSXin Li return "\n".join(sections) 484*760c253cSXin Li 485*760c253cSXin Li 486*760c253cSXin Lidef _GetHTMLCharts(label_names, test_results): 487*760c253cSXin Li charts = [] 488*760c253cSXin Li for item, runs in test_results.items(): 489*760c253cSXin Li # Fun fact: label_names is actually *entirely* useless as a param, since we 490*760c253cSXin Li # never add headers. We still need to pass it anyway. 491*760c253cSXin Li table = TableGenerator(runs, label_names).GetTable() 492*760c253cSXin Li columns = [ 493*760c253cSXin Li Column(AmeanResult(), Format()), 494*760c253cSXin Li Column(MinResult(), Format()), 495*760c253cSXin Li Column(MaxResult(), Format()), 496*760c253cSXin Li ] 497*760c253cSXin Li tf = TableFormatter(table, columns) 498*760c253cSXin Li data_table = tf.GetCellTable("full", headers=False) 499*760c253cSXin Li 500*760c253cSXin Li for cur_row_data in data_table: 501*760c253cSXin Li test_key = cur_row_data[0].string_value 502*760c253cSXin Li title = "{0}: {1}".format(item, test_key.replace("/", "")) 503*760c253cSXin Li chart = ColumnChart(title, 300, 200) 504*760c253cSXin Li chart.AddColumn("Label", "string") 505*760c253cSXin Li chart.AddColumn("Average", "number") 506*760c253cSXin Li chart.AddColumn("Min", "number") 507*760c253cSXin Li chart.AddColumn("Max", "number") 508*760c253cSXin Li chart.AddSeries("Min", "line", "black") 509*760c253cSXin Li chart.AddSeries("Max", "line", "black") 510*760c253cSXin Li cur_index = 1 511*760c253cSXin Li for label in label_names: 512*760c253cSXin Li chart.AddRow( 513*760c253cSXin Li [ 514*760c253cSXin Li label, 515*760c253cSXin Li cur_row_data[cur_index].value, 516*760c253cSXin Li cur_row_data[cur_index + 1].value, 517*760c253cSXin Li cur_row_data[cur_index + 2].value, 518*760c253cSXin Li ] 519*760c253cSXin Li ) 520*760c253cSXin Li if isinstance(cur_row_data[cur_index].value, str): 521*760c253cSXin Li chart = None 522*760c253cSXin Li break 523*760c253cSXin Li cur_index += 3 524*760c253cSXin Li if chart: 525*760c253cSXin Li charts.append(chart) 526*760c253cSXin Li return charts 527*760c253cSXin Li 528*760c253cSXin Li 529*760c253cSXin Liclass HTMLResultsReport(ResultsReport): 530*760c253cSXin Li """Class to generate html result report.""" 531*760c253cSXin Li 532*760c253cSXin Li def __init__(self, benchmark_results, experiment=None): 533*760c253cSXin Li super(HTMLResultsReport, self).__init__(benchmark_results) 534*760c253cSXin Li self.experiment = experiment 535*760c253cSXin Li 536*760c253cSXin Li @staticmethod 537*760c253cSXin Li def FromExperiment(experiment): 538*760c253cSXin Li return HTMLResultsReport( 539*760c253cSXin Li BenchmarkResults.FromExperiment(experiment), experiment=experiment 540*760c253cSXin Li ) 541*760c253cSXin Li 542*760c253cSXin Li def GetReport(self): 543*760c253cSXin Li label_names = self.benchmark_results.label_names 544*760c253cSXin Li test_results = self.benchmark_results.run_keyvals 545*760c253cSXin Li charts = _GetHTMLCharts(label_names, test_results) 546*760c253cSXin Li chart_javascript = "".join(chart.GetJavascript() for chart in charts) 547*760c253cSXin Li chart_divs = "".join(chart.GetDiv() for chart in charts) 548*760c253cSXin Li 549*760c253cSXin Li if not self.benchmark_results.cwp_dso: 550*760c253cSXin Li summary_table = self.GetSummaryTables() 551*760c253cSXin Li perf_table = self.GetSummaryTables(summary_type="perf") 552*760c253cSXin Li else: 553*760c253cSXin Li summary_table = self.GetSummaryTables(summary_type="samples") 554*760c253cSXin Li perf_table = None 555*760c253cSXin Li full_table = self.GetFullTables() 556*760c253cSXin Li 557*760c253cSXin Li experiment_file = "" 558*760c253cSXin Li if self.experiment is not None: 559*760c253cSXin Li experiment_file = self.experiment.experiment_file 560*760c253cSXin Li # Use kwargs for code readability, and so that testing is a bit easier. 561*760c253cSXin Li return templates.GenerateHTMLPage( 562*760c253cSXin Li perf_table=perf_table, 563*760c253cSXin Li chart_js=chart_javascript, 564*760c253cSXin Li summary_table=summary_table, 565*760c253cSXin Li print_table=_PrintTable, 566*760c253cSXin Li chart_divs=chart_divs, 567*760c253cSXin Li full_table=full_table, 568*760c253cSXin Li experiment_file=experiment_file, 569*760c253cSXin Li ) 570*760c253cSXin Li 571*760c253cSXin Li 572*760c253cSXin Lidef ParseStandardPerfReport(report_data): 573*760c253cSXin Li """Parses the output of `perf report`. 574*760c253cSXin Li 575*760c253cSXin Li It'll parse the following: 576*760c253cSXin Li {{garbage}} 577*760c253cSXin Li # Samples: 1234M of event 'foo' 578*760c253cSXin Li 579*760c253cSXin Li 1.23% command shared_object location function::name 580*760c253cSXin Li 581*760c253cSXin Li 1.22% command shared_object location function2::name 582*760c253cSXin Li 583*760c253cSXin Li # Samples: 999K of event 'bar' 584*760c253cSXin Li 585*760c253cSXin Li 0.23% command shared_object location function3::name 586*760c253cSXin Li {{etc.}} 587*760c253cSXin Li 588*760c253cSXin Li Into: 589*760c253cSXin Li {'foo': {'function::name': 1.23, 'function2::name': 1.22}, 590*760c253cSXin Li 'bar': {'function3::name': 0.23, etc.}} 591*760c253cSXin Li """ 592*760c253cSXin Li # This function fails silently on its if it's handed a string (as opposed to a 593*760c253cSXin Li # list of lines). So, auto-split if we do happen to get a string. 594*760c253cSXin Li if isinstance(report_data, str): 595*760c253cSXin Li report_data = report_data.splitlines() 596*760c253cSXin Li # When switching to python3 catch the case when bytes are passed. 597*760c253cSXin Li elif isinstance(report_data, bytes): 598*760c253cSXin Li raise TypeError() 599*760c253cSXin Li 600*760c253cSXin Li # Samples: N{K,M,G} of event 'event-name' 601*760c253cSXin Li samples_regex = re.compile(r"#\s+Samples: \d+\S? of event '([^']+)'") 602*760c253cSXin Li 603*760c253cSXin Li # We expect lines like: 604*760c253cSXin Li # N.NN% command samples shared_object [location] symbol 605*760c253cSXin Li # 606*760c253cSXin Li # Note that we're looking at stripped lines, so there is no space at the 607*760c253cSXin Li # start. 608*760c253cSXin Li perf_regex = re.compile( 609*760c253cSXin Li r"^(\d+(?:.\d*)?)%" # N.NN% 610*760c253cSXin Li r"\s*\d+" # samples count (ignored) 611*760c253cSXin Li r"\s*\S+" # command (ignored) 612*760c253cSXin Li r"\s*\S+" # shared_object (ignored) 613*760c253cSXin Li r"\s*\[.\]" # location (ignored) 614*760c253cSXin Li r"\s*(\S.+)" # function 615*760c253cSXin Li ) 616*760c253cSXin Li 617*760c253cSXin Li stripped_lines = (l.strip() for l in report_data) 618*760c253cSXin Li nonempty_lines = (l for l in stripped_lines if l) 619*760c253cSXin Li # Ignore all lines before we see samples_regex 620*760c253cSXin Li interesting_lines = itertools.dropwhile( 621*760c253cSXin Li lambda x: not samples_regex.match(x), nonempty_lines 622*760c253cSXin Li ) 623*760c253cSXin Li 624*760c253cSXin Li first_sample_line = next(interesting_lines, None) 625*760c253cSXin Li # Went through the entire file without finding a 'samples' header. Quit. 626*760c253cSXin Li if first_sample_line is None: 627*760c253cSXin Li return {} 628*760c253cSXin Li 629*760c253cSXin Li sample_name = samples_regex.match(first_sample_line).group(1) 630*760c253cSXin Li current_result = {} 631*760c253cSXin Li results = {sample_name: current_result} 632*760c253cSXin Li for line in interesting_lines: 633*760c253cSXin Li samples_match = samples_regex.match(line) 634*760c253cSXin Li if samples_match: 635*760c253cSXin Li sample_name = samples_match.group(1) 636*760c253cSXin Li current_result = {} 637*760c253cSXin Li results[sample_name] = current_result 638*760c253cSXin Li continue 639*760c253cSXin Li 640*760c253cSXin Li match = perf_regex.match(line) 641*760c253cSXin Li if not match: 642*760c253cSXin Li continue 643*760c253cSXin Li percentage_str, func_name = match.groups() 644*760c253cSXin Li try: 645*760c253cSXin Li percentage = float(percentage_str) 646*760c253cSXin Li except ValueError: 647*760c253cSXin Li # Couldn't parse it; try to be "resilient". 648*760c253cSXin Li continue 649*760c253cSXin Li current_result[func_name] = percentage 650*760c253cSXin Li return results 651*760c253cSXin Li 652*760c253cSXin Li 653*760c253cSXin Lidef _ReadExperimentPerfReport( 654*760c253cSXin Li results_directory, label_name, benchmark_name, benchmark_iteration 655*760c253cSXin Li): 656*760c253cSXin Li """Reads a perf report for the given benchmark. Returns {} on failure. 657*760c253cSXin Li 658*760c253cSXin Li The result should be a map of maps; it should look like: 659*760c253cSXin Li {perf_event_name: {function_name: pct_time_spent}}, e.g. 660*760c253cSXin Li {'cpu_cycles': {'_malloc': 10.0, '_free': 0.3, ...}} 661*760c253cSXin Li """ 662*760c253cSXin Li raw_dir_name = label_name + benchmark_name + str(benchmark_iteration + 1) 663*760c253cSXin Li dir_name = "".join(c for c in raw_dir_name if c.isalnum()) 664*760c253cSXin Li file_name = os.path.join(results_directory, dir_name, "perf.data.report.0") 665*760c253cSXin Li try: 666*760c253cSXin Li with open(file_name) as in_file: 667*760c253cSXin Li return ParseStandardPerfReport(in_file) 668*760c253cSXin Li except IOError: 669*760c253cSXin Li # Yes, we swallow any IO-related errors. 670*760c253cSXin Li return {} 671*760c253cSXin Li 672*760c253cSXin Li 673*760c253cSXin Li# Split out so that testing (specifically: mocking) is easier 674*760c253cSXin Lidef _ExperimentToKeyvals(experiment, for_json_report): 675*760c253cSXin Li """Converts an experiment to keyvals.""" 676*760c253cSXin Li return OrganizeResults( 677*760c253cSXin Li experiment.benchmark_runs, 678*760c253cSXin Li experiment.labels, 679*760c253cSXin Li json_report=for_json_report, 680*760c253cSXin Li ) 681*760c253cSXin Li 682*760c253cSXin Li 683*760c253cSXin Liclass BenchmarkResults(object): 684*760c253cSXin Li """The minimum set of fields that any ResultsReport will take.""" 685*760c253cSXin Li 686*760c253cSXin Li def __init__( 687*760c253cSXin Li self, 688*760c253cSXin Li label_names, 689*760c253cSXin Li benchmark_names_and_iterations, 690*760c253cSXin Li run_keyvals, 691*760c253cSXin Li ignore_min_max=False, 692*760c253cSXin Li read_perf_report=None, 693*760c253cSXin Li cwp_dso=None, 694*760c253cSXin Li weights=None, 695*760c253cSXin Li ): 696*760c253cSXin Li if read_perf_report is None: 697*760c253cSXin Li 698*760c253cSXin Li def _NoPerfReport(*_args, **_kwargs): 699*760c253cSXin Li return {} 700*760c253cSXin Li 701*760c253cSXin Li read_perf_report = _NoPerfReport 702*760c253cSXin Li 703*760c253cSXin Li self.label_names = label_names 704*760c253cSXin Li self.benchmark_names_and_iterations = benchmark_names_and_iterations 705*760c253cSXin Li self.iter_counts = dict(benchmark_names_and_iterations) 706*760c253cSXin Li self.run_keyvals = run_keyvals 707*760c253cSXin Li self.ignore_min_max = ignore_min_max 708*760c253cSXin Li self.read_perf_report = read_perf_report 709*760c253cSXin Li self.cwp_dso = cwp_dso 710*760c253cSXin Li self.weights = dict(weights) if weights else None 711*760c253cSXin Li 712*760c253cSXin Li @staticmethod 713*760c253cSXin Li def FromExperiment(experiment, for_json_report=False): 714*760c253cSXin Li label_names = [label.name for label in experiment.labels] 715*760c253cSXin Li benchmark_names_and_iterations = [ 716*760c253cSXin Li (benchmark.name, benchmark.iterations) 717*760c253cSXin Li for benchmark in experiment.benchmarks 718*760c253cSXin Li ] 719*760c253cSXin Li run_keyvals = _ExperimentToKeyvals(experiment, for_json_report) 720*760c253cSXin Li ignore_min_max = experiment.ignore_min_max 721*760c253cSXin Li read_perf_report = functools.partial( 722*760c253cSXin Li _ReadExperimentPerfReport, experiment.results_directory 723*760c253cSXin Li ) 724*760c253cSXin Li cwp_dso = experiment.cwp_dso 725*760c253cSXin Li weights = [ 726*760c253cSXin Li (benchmark.name, benchmark.weight) 727*760c253cSXin Li for benchmark in experiment.benchmarks 728*760c253cSXin Li ] 729*760c253cSXin Li return BenchmarkResults( 730*760c253cSXin Li label_names, 731*760c253cSXin Li benchmark_names_and_iterations, 732*760c253cSXin Li run_keyvals, 733*760c253cSXin Li ignore_min_max, 734*760c253cSXin Li read_perf_report, 735*760c253cSXin Li cwp_dso, 736*760c253cSXin Li weights, 737*760c253cSXin Li ) 738*760c253cSXin Li 739*760c253cSXin Li 740*760c253cSXin Lidef _GetElemByName(name, from_list): 741*760c253cSXin Li """Gets an element from the given list by its name field. 742*760c253cSXin Li 743*760c253cSXin Li Raises an error if it doesn't find exactly one match. 744*760c253cSXin Li """ 745*760c253cSXin Li elems = [e for e in from_list if e.name == name] 746*760c253cSXin Li if len(elems) != 1: 747*760c253cSXin Li raise ValueError( 748*760c253cSXin Li "Expected 1 item named %s, found %d" % (name, len(elems)) 749*760c253cSXin Li ) 750*760c253cSXin Li return elems[0] 751*760c253cSXin Li 752*760c253cSXin Li 753*760c253cSXin Lidef _Unlist(l): 754*760c253cSXin Li """If l is a list, extracts the first element of l. Otherwise, returns l.""" 755*760c253cSXin Li return l[0] if isinstance(l, list) else l 756*760c253cSXin Li 757*760c253cSXin Li 758*760c253cSXin Liclass JSONResultsReport(ResultsReport): 759*760c253cSXin Li """Class that generates JSON reports for experiments.""" 760*760c253cSXin Li 761*760c253cSXin Li def __init__( 762*760c253cSXin Li self, 763*760c253cSXin Li benchmark_results, 764*760c253cSXin Li benchmark_date=None, 765*760c253cSXin Li benchmark_time=None, 766*760c253cSXin Li experiment=None, 767*760c253cSXin Li json_args=None, 768*760c253cSXin Li ): 769*760c253cSXin Li """Construct a JSONResultsReport. 770*760c253cSXin Li 771*760c253cSXin Li json_args is the dict of arguments we pass to json.dumps in GetReport(). 772*760c253cSXin Li """ 773*760c253cSXin Li super(JSONResultsReport, self).__init__(benchmark_results) 774*760c253cSXin Li 775*760c253cSXin Li defaults = TelemetryDefaults() 776*760c253cSXin Li defaults.ReadDefaultsFile() 777*760c253cSXin Li summary_field_defaults = defaults.GetDefault() 778*760c253cSXin Li if summary_field_defaults is None: 779*760c253cSXin Li summary_field_defaults = {} 780*760c253cSXin Li self.summary_field_defaults = summary_field_defaults 781*760c253cSXin Li 782*760c253cSXin Li if json_args is None: 783*760c253cSXin Li json_args = {} 784*760c253cSXin Li self.json_args = json_args 785*760c253cSXin Li 786*760c253cSXin Li self.experiment = experiment 787*760c253cSXin Li if not benchmark_date: 788*760c253cSXin Li timestamp = datetime.datetime.strftime( 789*760c253cSXin Li datetime.datetime.now(), "%Y-%m-%d %H:%M:%S" 790*760c253cSXin Li ) 791*760c253cSXin Li benchmark_date, benchmark_time = timestamp.split(" ") 792*760c253cSXin Li self.date = benchmark_date 793*760c253cSXin Li self.time = benchmark_time 794*760c253cSXin Li 795*760c253cSXin Li @staticmethod 796*760c253cSXin Li def FromExperiment( 797*760c253cSXin Li experiment, benchmark_date=None, benchmark_time=None, json_args=None 798*760c253cSXin Li ): 799*760c253cSXin Li benchmark_results = BenchmarkResults.FromExperiment( 800*760c253cSXin Li experiment, for_json_report=True 801*760c253cSXin Li ) 802*760c253cSXin Li return JSONResultsReport( 803*760c253cSXin Li benchmark_results, 804*760c253cSXin Li benchmark_date, 805*760c253cSXin Li benchmark_time, 806*760c253cSXin Li experiment, 807*760c253cSXin Li json_args, 808*760c253cSXin Li ) 809*760c253cSXin Li 810*760c253cSXin Li def GetReportObjectIgnoringExperiment(self): 811*760c253cSXin Li """Gets the JSON report object specifically for the output data. 812*760c253cSXin Li 813*760c253cSXin Li Ignores any experiment-specific fields (e.g. board, machine checksum, ...). 814*760c253cSXin Li """ 815*760c253cSXin Li benchmark_results = self.benchmark_results 816*760c253cSXin Li label_names = benchmark_results.label_names 817*760c253cSXin Li summary_field_defaults = self.summary_field_defaults 818*760c253cSXin Li final_results = [] 819*760c253cSXin Li for test, test_results in benchmark_results.run_keyvals.items(): 820*760c253cSXin Li for label_name, label_results in zip(label_names, test_results): 821*760c253cSXin Li for iter_results in label_results: 822*760c253cSXin Li passed = iter_results.get("retval") == 0 823*760c253cSXin Li json_results = { 824*760c253cSXin Li "date": self.date, 825*760c253cSXin Li "time": self.time, 826*760c253cSXin Li "label": label_name, 827*760c253cSXin Li "test_name": test, 828*760c253cSXin Li "pass": passed, 829*760c253cSXin Li } 830*760c253cSXin Li final_results.append(json_results) 831*760c253cSXin Li 832*760c253cSXin Li if not passed: 833*760c253cSXin Li continue 834*760c253cSXin Li 835*760c253cSXin Li # Get overall results. 836*760c253cSXin Li summary_fields = summary_field_defaults.get(test) 837*760c253cSXin Li if summary_fields is not None: 838*760c253cSXin Li value = [] 839*760c253cSXin Li json_results["overall_result"] = value 840*760c253cSXin Li for f in summary_fields: 841*760c253cSXin Li v = iter_results.get(f) 842*760c253cSXin Li if v is None: 843*760c253cSXin Li continue 844*760c253cSXin Li # New telemetry results format: sometimes we get a list of lists 845*760c253cSXin Li # now. 846*760c253cSXin Li v = _Unlist(_Unlist(v)) 847*760c253cSXin Li value.append((f, float(v))) 848*760c253cSXin Li 849*760c253cSXin Li # Get detailed results. 850*760c253cSXin Li detail_results = {} 851*760c253cSXin Li json_results["detailed_results"] = detail_results 852*760c253cSXin Li for k, v in iter_results.items(): 853*760c253cSXin Li if ( 854*760c253cSXin Li k == "retval" 855*760c253cSXin Li or k == "PASS" 856*760c253cSXin Li or k == ["PASS"] 857*760c253cSXin Li or v == "PASS" 858*760c253cSXin Li ): 859*760c253cSXin Li continue 860*760c253cSXin Li 861*760c253cSXin Li v = _Unlist(v) 862*760c253cSXin Li if "machine" in k: 863*760c253cSXin Li json_results[k] = v 864*760c253cSXin Li elif v is not None: 865*760c253cSXin Li if isinstance(v, list): 866*760c253cSXin Li detail_results[k] = [float(d) for d in v] 867*760c253cSXin Li else: 868*760c253cSXin Li detail_results[k] = float(v) 869*760c253cSXin Li return final_results 870*760c253cSXin Li 871*760c253cSXin Li def GetReportObject(self): 872*760c253cSXin Li """Generate the JSON report, returning it as a python object.""" 873*760c253cSXin Li report_list = self.GetReportObjectIgnoringExperiment() 874*760c253cSXin Li if self.experiment is not None: 875*760c253cSXin Li self._AddExperimentSpecificFields(report_list) 876*760c253cSXin Li return report_list 877*760c253cSXin Li 878*760c253cSXin Li def _AddExperimentSpecificFields(self, report_list): 879*760c253cSXin Li """Add experiment-specific data to the JSON report.""" 880*760c253cSXin Li board = self.experiment.labels[0].board 881*760c253cSXin Li manager = self.experiment.machine_manager 882*760c253cSXin Li for report in report_list: 883*760c253cSXin Li label_name = report["label"] 884*760c253cSXin Li label = _GetElemByName(label_name, self.experiment.labels) 885*760c253cSXin Li 886*760c253cSXin Li img_path = os.path.realpath( 887*760c253cSXin Li os.path.expanduser(label.chromeos_image) 888*760c253cSXin Li ) 889*760c253cSXin Li ver, img = ParseChromeosImage(img_path) 890*760c253cSXin Li 891*760c253cSXin Li report.update( 892*760c253cSXin Li { 893*760c253cSXin Li "board": board, 894*760c253cSXin Li "chromeos_image": img, 895*760c253cSXin Li "chromeos_version": ver, 896*760c253cSXin Li "chrome_version": label.chrome_version, 897*760c253cSXin Li "compiler": label.compiler, 898*760c253cSXin Li } 899*760c253cSXin Li ) 900*760c253cSXin Li 901*760c253cSXin Li if not report["pass"]: 902*760c253cSXin Li continue 903*760c253cSXin Li if "machine_checksum" not in report: 904*760c253cSXin Li report["machine_checksum"] = manager.machine_checksum[ 905*760c253cSXin Li label_name 906*760c253cSXin Li ] 907*760c253cSXin Li if "machine_string" not in report: 908*760c253cSXin Li report["machine_string"] = manager.machine_checksum_string[ 909*760c253cSXin Li label_name 910*760c253cSXin Li ] 911*760c253cSXin Li 912*760c253cSXin Li def GetReport(self): 913*760c253cSXin Li """Dump the results of self.GetReportObject() to a string as JSON.""" 914*760c253cSXin Li # This exists for consistency with the other GetReport methods. 915*760c253cSXin Li # Specifically, they all return strings, so it's a bit awkward if the JSON 916*760c253cSXin Li # results reporter returns an object. 917*760c253cSXin Li return json.dumps(self.GetReportObject(), **self.json_args) 918