xref: /aosp_15_r20/external/angle/scripts/process_angle_perf_results.py (revision 8975f5c5ed3d1c378011245431ada316dfb6f244)
1#!/usr/bin/env vpython
2#
3# Copyright 2021 The ANGLE Project Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# process_angle_perf_results.py:
8#   Perf result merging and upload. Adapted from the Chromium script:
9#   https://chromium.googlesource.com/chromium/src/+/main/tools/perf/process_perf_results.py
10
11from __future__ import print_function
12
13import argparse
14import collections
15import datetime
16import json
17import logging
18import multiprocessing
19import os
20import pathlib
21import shutil
22import subprocess
23import sys
24import tempfile
25import time
26import uuid
27
28logging.basicConfig(
29    level=logging.INFO,
30    format='(%(levelname)s) %(asctime)s pid=%(process)d'
31    '  %(module)s.%(funcName)s:%(lineno)d  %(message)s')
32
33PY_UTILS = str(pathlib.Path(__file__).resolve().parents[1] / 'src' / 'tests' / 'py_utils')
34if PY_UTILS not in sys.path:
35    os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS)
36import angle_metrics
37import angle_path_util
38
39angle_path_util.AddDepsDirToPath('tools/perf')
40from core import path_util
41
42path_util.AddTelemetryToPath()
43from core import upload_results_to_perf_dashboard
44from core import results_merger
45
46path_util.AddAndroidPylibToPath()
47try:
48    from pylib.utils import logdog_helper
49except ImportError:
50    pass
51
52path_util.AddTracingToPath()
53from tracing.value import histogram
54from tracing.value import histogram_set
55from tracing.value.diagnostics import generic_set
56from tracing.value.diagnostics import reserved_infos
57
58RESULTS_URL = 'https://chromeperf.appspot.com'
59JSON_CONTENT_TYPE = 'application/json'
60MACHINE_GROUP = 'ANGLE'
61BUILD_URL = 'https://ci.chromium.org/ui/p/angle/builders/ci/%s/%d'
62
63GSUTIL_PY_PATH = str(
64    pathlib.Path(__file__).resolve().parents[1] / 'third_party' / 'depot_tools' / 'gsutil.py')
65
66
67def _upload_perf_results(json_to_upload, name, configuration_name, build_properties,
68                         output_json_file):
69    """Upload the contents of result JSON(s) to the perf dashboard."""
70    args = [
71        '--buildername',
72        build_properties['buildername'],
73        '--buildnumber',
74        str(build_properties['buildnumber']),
75        '--name',
76        name,
77        '--configuration-name',
78        configuration_name,
79        '--results-file',
80        json_to_upload,
81        '--results-url',
82        RESULTS_URL,
83        '--output-json-file',
84        output_json_file,
85        '--perf-dashboard-machine-group',
86        MACHINE_GROUP,
87        '--got-angle-revision',
88        build_properties['got_angle_revision'],
89        '--send-as-histograms',
90        '--project',
91        'angle',
92    ]
93
94    if build_properties.get('git_revision'):
95        args.append('--git-revision')
96        args.append(build_properties['git_revision'])
97
98    #TODO(crbug.com/1072729): log this in top level
99    logging.info('upload_results_to_perf_dashboard: %s.' % args)
100
101    return upload_results_to_perf_dashboard.main(args)
102
103
104def _merge_json_output(output_json, jsons_to_merge, extra_links, test_cross_device=False):
105    """Merges the contents of one or more results JSONs.
106
107  Args:
108    output_json: A path to a JSON file to which the merged results should be
109      written.
110    jsons_to_merge: A list of JSON files that should be merged.
111    extra_links: a (key, value) map in which keys are the human-readable strings
112      which describe the data, and value is logdog url that contain the data.
113  """
114    begin_time = time.time()
115    merged_results = results_merger.merge_test_results(jsons_to_merge, test_cross_device)
116
117    # Only append the perf results links if present
118    if extra_links:
119        merged_results['links'] = extra_links
120
121    with open(output_json, 'w') as f:
122        json.dump(merged_results, f)
123
124    end_time = time.time()
125    print_duration('Merging json test results', begin_time, end_time)
126    return 0
127
128
129def _handle_perf_json_test_results(benchmark_directory_map, test_results_list):
130    """Checks the test_results.json under each folder:
131
132  1. mark the benchmark 'enabled' if tests results are found
133  2. add the json content to a list for non-ref.
134  """
135    begin_time = time.time()
136    benchmark_enabled_map = {}
137    for benchmark_name, directories in benchmark_directory_map.items():
138        for directory in directories:
139            # Obtain the test name we are running
140            is_ref = '.reference' in benchmark_name
141            enabled = True
142            try:
143                with open(os.path.join(directory, 'test_results.json')) as json_data:
144                    json_results = json.load(json_data)
145                    if not json_results:
146                        # Output is null meaning the test didn't produce any results.
147                        # Want to output an error and continue loading the rest of the
148                        # test results.
149                        logging.warning('No results produced for %s, skipping upload' % directory)
150                        continue
151                    if json_results.get('version') == 3:
152                        # Non-telemetry tests don't have written json results but
153                        # if they are executing then they are enabled and will generate
154                        # chartjson results.
155                        if not bool(json_results.get('tests')):
156                            enabled = False
157                    if not is_ref:
158                        # We don't need to upload reference build data to the
159                        # flakiness dashboard since we don't monitor the ref build
160                        test_results_list.append(json_results)
161            except IOError as e:
162                # TODO(crbug.com/936602): Figure out how to surface these errors. Should
163                # we have a non-zero exit code if we error out?
164                logging.error('Failed to obtain test results for %s: %s', benchmark_name, e)
165                continue
166            if not enabled:
167                # We don't upload disabled benchmarks or tests that are run
168                # as a smoke test
169                logging.info('Benchmark %s ran no tests on at least one shard' % benchmark_name)
170                continue
171            benchmark_enabled_map[benchmark_name] = True
172
173    end_time = time.time()
174    print_duration('Analyzing perf json test results', begin_time, end_time)
175    return benchmark_enabled_map
176
177
178def _generate_unique_logdog_filename(name_prefix):
179    return name_prefix + '_' + str(uuid.uuid4())
180
181
182def _handle_perf_logs(benchmark_directory_map, extra_links):
183    """ Upload benchmark logs to logdog and add a page entry for them. """
184    begin_time = time.time()
185    benchmark_logs_links = collections.defaultdict(list)
186
187    for benchmark_name, directories in benchmark_directory_map.items():
188        for directory in directories:
189            benchmark_log_file = os.path.join(directory, 'benchmark_log.txt')
190            if os.path.exists(benchmark_log_file):
191                with open(benchmark_log_file) as f:
192                    uploaded_link = logdog_helper.text(
193                        name=_generate_unique_logdog_filename(benchmark_name), data=f.read())
194                    benchmark_logs_links[benchmark_name].append(uploaded_link)
195
196    logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Logs')
197    logdog_stream = logdog_helper.text(
198        logdog_file_name,
199        json.dumps(benchmark_logs_links, sort_keys=True, indent=4, separators=(',', ': ')),
200        content_type=JSON_CONTENT_TYPE)
201    extra_links['Benchmarks logs'] = logdog_stream
202    end_time = time.time()
203    print_duration('Generating perf log streams', begin_time, end_time)
204
205
206def _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links):
207    begin_time = time.time()
208    with open(benchmarks_shard_map_file) as f:
209        benchmarks_shard_data = f.read()
210        logdog_file_name = _generate_unique_logdog_filename('Benchmarks_Shard_Map')
211        logdog_stream = logdog_helper.text(
212            logdog_file_name, benchmarks_shard_data, content_type=JSON_CONTENT_TYPE)
213        extra_links['Benchmarks shard map'] = logdog_stream
214    end_time = time.time()
215    print_duration('Generating benchmark shard map stream', begin_time, end_time)
216
217
218def _get_benchmark_name(directory):
219    return os.path.basename(directory).replace(" benchmark", "")
220
221
222def _scan_output_dir(task_output_dir):
223    benchmark_directory_map = {}
224    benchmarks_shard_map_file = None
225
226    directory_list = [
227        f for f in os.listdir(task_output_dir)
228        if not os.path.isfile(os.path.join(task_output_dir, f))
229    ]
230    benchmark_directory_list = []
231    for directory in directory_list:
232        for f in os.listdir(os.path.join(task_output_dir, directory)):
233            path = os.path.join(task_output_dir, directory, f)
234            if os.path.isdir(path):
235                benchmark_directory_list.append(path)
236            elif path.endswith('benchmarks_shard_map.json'):
237                benchmarks_shard_map_file = path
238    # Now create a map of benchmark name to the list of directories
239    # the lists were written to.
240    for directory in benchmark_directory_list:
241        benchmark_name = _get_benchmark_name(directory)
242        logging.debug('Found benchmark %s directory %s' % (benchmark_name, directory))
243        if benchmark_name in benchmark_directory_map.keys():
244            benchmark_directory_map[benchmark_name].append(directory)
245        else:
246            benchmark_directory_map[benchmark_name] = [directory]
247
248    return benchmark_directory_map, benchmarks_shard_map_file
249
250
251def _upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map, build_properties_map):
252    metric_filenames = []
253
254    for benchmark_name, directories in benchmark_directory_map.items():
255        if not benchmark_enabled_map.get(benchmark_name, False):
256            continue
257
258        for directory in directories:
259            metric_filenames.append(os.path.join(directory, 'angle_metrics.json'))
260
261    assert metric_filenames
262
263    buildername = build_properties_map['buildername']  # e.g. win10-nvidia-gtx1660-perf
264    skia_data = {
265        'version': 1,
266        'git_hash': build_properties_map['got_angle_revision'],
267        'key': {
268            'buildername': buildername,
269        },
270        'results': angle_metrics.ConvertToSkiaPerf(metric_filenames),
271    }
272
273    skia_perf_dir = tempfile.mkdtemp('skia_perf')
274    try:
275        local_file = os.path.join(skia_perf_dir, '%s.%s.json' % (buildername, time.time()))
276        with open(local_file, 'w') as f:
277            json.dump(skia_data, f, indent=2)
278        gs_dir = 'gs://angle-perf-skia/angle_perftests/%s/' % (
279            datetime.datetime.now().strftime('%Y/%m/%d/%H'))
280        upload_cmd = ['vpython3', GSUTIL_PY_PATH, 'cp', local_file, gs_dir]
281        logging.info('Skia upload: %s', ' '.join(upload_cmd))
282        subprocess.check_call(upload_cmd)
283    finally:
284        shutil.rmtree(skia_perf_dir)
285
286
287def process_perf_results(output_json,
288                         configuration_name,
289                         build_properties,
290                         task_output_dir,
291                         smoke_test_mode,
292                         output_results_dir,
293                         lightweight=False,
294                         skip_perf=False):
295    """Process perf results.
296
297  Consists of merging the json-test-format output, uploading the perf test
298  output (histogram), and store the benchmark logs in logdog.
299
300  Each directory in the task_output_dir represents one benchmark
301  that was run. Within this directory, there is a subdirectory with the name
302  of the benchmark that was run. In that subdirectory, there is a
303  perftest-output.json file containing the performance results in histogram
304  format and an output.json file containing the json test results for the
305  benchmark.
306
307  Returns:
308    (return_code, upload_results_map):
309      return_code is 0 if the whole operation is successful, non zero otherwise.
310      benchmark_upload_result_map: the dictionary that describe which benchmarks
311        were successfully uploaded.
312  """
313    handle_perf = not lightweight or not skip_perf
314    handle_non_perf = not lightweight or skip_perf
315    logging.info('lightweight mode: %r; handle_perf: %r; handle_non_perf: %r' %
316                 (lightweight, handle_perf, handle_non_perf))
317
318    begin_time = time.time()
319    return_code = 0
320    benchmark_upload_result_map = {}
321
322    benchmark_directory_map, benchmarks_shard_map_file = _scan_output_dir(task_output_dir)
323
324    test_results_list = []
325    extra_links = {}
326
327    if handle_non_perf:
328        # First, upload benchmarks shard map to logdog and add a page
329        # entry for it in extra_links.
330        if benchmarks_shard_map_file:
331            _handle_benchmarks_shard_map(benchmarks_shard_map_file, extra_links)
332
333        # Second, upload all the benchmark logs to logdog and add a page entry for
334        # those links in extra_links.
335        _handle_perf_logs(benchmark_directory_map, extra_links)
336
337    # Then try to obtain the list of json test results to merge
338    # and determine the status of each benchmark.
339    benchmark_enabled_map = _handle_perf_json_test_results(benchmark_directory_map,
340                                                           test_results_list)
341
342    if not smoke_test_mode and handle_perf:
343        build_properties_map = json.loads(build_properties)
344        if not configuration_name:
345            # we are deprecating perf-id crbug.com/817823
346            configuration_name = build_properties_map['buildername']
347
348        try:
349            return_code, benchmark_upload_result_map = _handle_perf_results(
350                benchmark_enabled_map, benchmark_directory_map, configuration_name,
351                build_properties_map, extra_links, output_results_dir)
352        except Exception:
353            logging.exception('Error handling perf results jsons')
354            return_code = 1
355
356        try:
357            _upload_to_skia_perf(benchmark_directory_map, benchmark_enabled_map,
358                                 build_properties_map)
359        except Exception:
360            logging.exception('Error uploading to skia perf')
361            return_code = 1
362
363    if handle_non_perf:
364        # Finally, merge all test results json, add the extra links and write out to
365        # output location
366        try:
367            _merge_json_output(output_json, test_results_list, extra_links)
368        except Exception:
369            logging.exception('Error handling test results jsons.')
370
371    end_time = time.time()
372    print_duration('Total process_perf_results', begin_time, end_time)
373    return return_code, benchmark_upload_result_map
374
375
376def _merge_histogram_results(histogram_lists):
377    merged_results = []
378    for histogram_list in histogram_lists:
379        merged_results += histogram_list
380
381    return merged_results
382
383
384def _load_histogram_set_from_dict(data):
385    histograms = histogram_set.HistogramSet()
386    histograms.ImportDicts(data)
387    return histograms
388
389
390def _add_build_info(results, benchmark_name, build_properties):
391    histograms = _load_histogram_set_from_dict(results)
392
393    common_diagnostics = {
394        reserved_infos.MASTERS:
395            build_properties['builder_group'],
396        reserved_infos.BOTS:
397            build_properties['buildername'],
398        reserved_infos.POINT_ID:
399            build_properties['angle_commit_pos'],
400        reserved_infos.BENCHMARKS:
401            benchmark_name,
402        reserved_infos.ANGLE_REVISIONS:
403            build_properties['got_angle_revision'],
404        reserved_infos.BUILD_URLS:
405            BUILD_URL % (build_properties['buildername'], build_properties['buildnumber']),
406    }
407
408    for k, v in common_diagnostics.items():
409        histograms.AddSharedDiagnosticToAllHistograms(k.name, generic_set.GenericSet([v]))
410
411    return histograms.AsDicts()
412
413
414def _merge_perf_results(benchmark_name, results_filename, directories, build_properties):
415    begin_time = time.time()
416    collected_results = []
417    for directory in directories:
418        filename = os.path.join(directory, 'perf_results.json')
419        try:
420            with open(filename) as pf:
421                collected_results.append(json.load(pf))
422        except IOError as e:
423            # TODO(crbug.com/936602): Figure out how to surface these errors. Should
424            # we have a non-zero exit code if we error out?
425            logging.error('Failed to obtain perf results from %s: %s', directory, e)
426    if not collected_results:
427        logging.error('Failed to obtain any perf results from %s.', benchmark_name)
428        return
429
430    # Assuming that multiple shards will be histogram set
431    # Non-telemetry benchmarks only ever run on one shard
432    merged_results = []
433    assert (isinstance(collected_results[0], list))
434    merged_results = _merge_histogram_results(collected_results)
435
436    # Write additional histogram build info.
437    merged_results = _add_build_info(merged_results, benchmark_name, build_properties)
438
439    with open(results_filename, 'w') as rf:
440        json.dump(merged_results, rf)
441
442    end_time = time.time()
443    print_duration(('%s results merging' % (benchmark_name)), begin_time, end_time)
444
445
446def _upload_individual(benchmark_name, directories, configuration_name, build_properties,
447                       output_json_file):
448    tmpfile_dir = tempfile.mkdtemp()
449    try:
450        upload_begin_time = time.time()
451        # There are potentially multiple directores with results, re-write and
452        # merge them if necessary
453        results_filename = None
454        if len(directories) > 1:
455            merge_perf_dir = os.path.join(os.path.abspath(tmpfile_dir), benchmark_name)
456            if not os.path.exists(merge_perf_dir):
457                os.makedirs(merge_perf_dir)
458            results_filename = os.path.join(merge_perf_dir, 'merged_perf_results.json')
459            _merge_perf_results(benchmark_name, results_filename, directories, build_properties)
460        else:
461            # It was only written to one shard, use that shards data
462            results_filename = os.path.join(directories[0], 'perf_results.json')
463
464        results_size_in_mib = os.path.getsize(results_filename) / (2**20)
465        logging.info('Uploading perf results from %s benchmark (size %s Mib)' %
466                     (benchmark_name, results_size_in_mib))
467        upload_return_code = _upload_perf_results(results_filename, benchmark_name,
468                                                  configuration_name, build_properties,
469                                                  output_json_file)
470        upload_end_time = time.time()
471        print_duration(('%s upload time' % (benchmark_name)), upload_begin_time, upload_end_time)
472        return (benchmark_name, upload_return_code == 0)
473    finally:
474        shutil.rmtree(tmpfile_dir)
475
476
477def _upload_individual_benchmark(params):
478    try:
479        return _upload_individual(*params)
480    except Exception:
481        benchmark_name = params[0]
482        upload_succeed = False
483        logging.exception('Error uploading perf result of %s' % benchmark_name)
484        return benchmark_name, upload_succeed
485
486
487def _GetCpuCount(log=True):
488    try:
489        cpu_count = multiprocessing.cpu_count()
490        if sys.platform == 'win32':
491            # TODO(crbug.com/1190269) - we can't use more than 56
492            # cores on Windows or Python3 may hang.
493            cpu_count = min(cpu_count, 56)
494        return cpu_count
495    except NotImplementedError:
496        if log:
497            logging.warn('Failed to get a CPU count for this bot. See crbug.com/947035.')
498        # TODO(crbug.com/948281): This is currently set to 4 since the mac masters
499        # only have 4 cores. Once we move to all-linux, this can be increased or
500        # we can even delete this whole function and use multiprocessing.cpu_count()
501        # directly.
502        return 4
503
504
505def _load_shard_id_from_test_results(directory):
506    shard_id = None
507    test_json_path = os.path.join(directory, 'test_results.json')
508    try:
509        with open(test_json_path) as f:
510            test_json = json.load(f)
511            all_results = test_json['tests']
512            for _, benchmark_results in all_results.items():
513                for _, measurement_result in benchmark_results.items():
514                    shard_id = measurement_result['shard']
515                    break
516    except IOError as e:
517        logging.error('Failed to open test_results.json from %s: %s', test_json_path, e)
518    except KeyError as e:
519        logging.error('Failed to locate results in test_results.json: %s', e)
520    return shard_id
521
522
523def _find_device_id_by_shard_id(benchmarks_shard_map_file, shard_id):
524    try:
525        with open(benchmarks_shard_map_file) as f:
526            shard_map_json = json.load(f)
527            device_id = shard_map_json['extra_infos']['bot #%s' % shard_id]
528    except KeyError as e:
529        logging.error('Failed to locate device name in shard map: %s', e)
530    return device_id
531
532
533def _update_perf_json_with_summary_on_device_id(directory, device_id):
534    perf_json_path = os.path.join(directory, 'perf_results.json')
535    try:
536        with open(perf_json_path, 'r') as f:
537            perf_json = json.load(f)
538    except IOError as e:
539        logging.error('Failed to open perf_results.json from %s: %s', perf_json_path, e)
540    summary_key_guid = str(uuid.uuid4())
541    summary_key_generic_set = {
542        'values': ['device_id'],
543        'guid': summary_key_guid,
544        'type': 'GenericSet'
545    }
546    perf_json.insert(0, summary_key_generic_set)
547    logging.info('Inserted summary key generic set for perf result in %s: %s', directory,
548                 summary_key_generic_set)
549    stories_guids = set()
550    for entry in perf_json:
551        if 'diagnostics' in entry:
552            entry['diagnostics']['summaryKeys'] = summary_key_guid
553            stories_guids.add(entry['diagnostics']['stories'])
554    for entry in perf_json:
555        if 'guid' in entry and entry['guid'] in stories_guids:
556            entry['values'].append(device_id)
557    try:
558        with open(perf_json_path, 'w') as f:
559            json.dump(perf_json, f)
560    except IOError as e:
561        logging.error('Failed to writing perf_results.json to %s: %s', perf_json_path, e)
562    logging.info('Finished adding device id %s in perf result.', device_id)
563
564
565def _handle_perf_results(benchmark_enabled_map, benchmark_directory_map, configuration_name,
566                         build_properties, extra_links, output_results_dir):
567    """
568    Upload perf results to the perf dashboard.
569
570    This method also upload the perf results to logdog and augment it to
571    |extra_links|.
572
573    Returns:
574      (return_code, benchmark_upload_result_map)
575      return_code is 0 if this upload to perf dashboard successfully, 1
576        otherwise.
577       benchmark_upload_result_map is a dictionary describes which benchmark
578        was successfully uploaded.
579  """
580    begin_time = time.time()
581    # Upload all eligible benchmarks to the perf dashboard
582    results_dict = {}
583
584    invocations = []
585    for benchmark_name, directories in benchmark_directory_map.items():
586        if not benchmark_enabled_map.get(benchmark_name, False):
587            continue
588        # Create a place to write the perf results that you will write out to
589        # logdog.
590        output_json_file = os.path.join(output_results_dir, (str(uuid.uuid4()) + benchmark_name))
591        results_dict[benchmark_name] = output_json_file
592        #TODO(crbug.com/1072729): pass final arguments instead of build properties
593        # and configuration_name
594        invocations.append(
595            (benchmark_name, directories, configuration_name, build_properties, output_json_file))
596
597    # Kick off the uploads in multiple processes
598    # crbug.com/1035930: We are hitting HTTP Response 429. Limit ourselves
599    # to 2 processes to avoid this error. Uncomment the following code once
600    # the problem is fixed on the dashboard side.
601    # pool = multiprocessing.Pool(_GetCpuCount())
602    pool = multiprocessing.Pool(2)
603    upload_result_timeout = False
604    try:
605        async_result = pool.map_async(_upload_individual_benchmark, invocations)
606        # TODO(crbug.com/947035): What timeout is reasonable?
607        results = async_result.get(timeout=4000)
608    except multiprocessing.TimeoutError:
609        upload_result_timeout = True
610        logging.error('Timeout uploading benchmarks to perf dashboard in parallel')
611        results = []
612        for benchmark_name in benchmark_directory_map:
613            results.append((benchmark_name, False))
614    finally:
615        pool.terminate()
616
617    # Keep a mapping of benchmarks to their upload results
618    benchmark_upload_result_map = {}
619    for r in results:
620        benchmark_upload_result_map[r[0]] = r[1]
621
622    logdog_dict = {}
623    upload_failures_counter = 0
624    logdog_stream = None
625    logdog_label = 'Results Dashboard'
626    for benchmark_name, output_file in results_dict.items():
627        upload_succeed = benchmark_upload_result_map[benchmark_name]
628        if not upload_succeed:
629            upload_failures_counter += 1
630        is_reference = '.reference' in benchmark_name
631        _write_perf_data_to_logfile(
632            benchmark_name,
633            output_file,
634            configuration_name,
635            build_properties,
636            logdog_dict,
637            is_reference,
638            upload_failure=not upload_succeed)
639
640    logdog_file_name = _generate_unique_logdog_filename('Results_Dashboard_')
641    logdog_stream = logdog_helper.text(
642        logdog_file_name,
643        json.dumps(dict(logdog_dict), sort_keys=True, indent=4, separators=(',', ': ')),
644        content_type=JSON_CONTENT_TYPE)
645    if upload_failures_counter > 0:
646        logdog_label += (' %s merge script perf data upload failures' % upload_failures_counter)
647    extra_links[logdog_label] = logdog_stream
648    end_time = time.time()
649    print_duration('Uploading results to perf dashboard', begin_time, end_time)
650    if upload_result_timeout or upload_failures_counter > 0:
651        return 1, benchmark_upload_result_map
652    return 0, benchmark_upload_result_map
653
654
655def _write_perf_data_to_logfile(benchmark_name, output_file, configuration_name, build_properties,
656                                logdog_dict, is_ref, upload_failure):
657    viewer_url = None
658    # logdog file to write perf results to
659    if os.path.exists(output_file):
660        results = None
661        with open(output_file) as f:
662            try:
663                results = json.load(f)
664            except ValueError:
665                logging.error('Error parsing perf results JSON for benchmark  %s' % benchmark_name)
666        if results:
667            try:
668                json_fname = _generate_unique_logdog_filename(benchmark_name)
669                output_json_file = logdog_helper.open_text(json_fname)
670                json.dump(results, output_json_file, indent=4, separators=(',', ': '))
671            except ValueError as e:
672                logging.error('ValueError: "%s" while dumping output to logdog' % e)
673            finally:
674                output_json_file.close()
675            viewer_url = output_json_file.get_viewer_url()
676    else:
677        logging.warning("Perf results JSON file doesn't exist for benchmark %s" % benchmark_name)
678
679    base_benchmark_name = benchmark_name.replace('.reference', '')
680
681    if base_benchmark_name not in logdog_dict:
682        logdog_dict[base_benchmark_name] = {}
683
684    # add links for the perf results and the dashboard url to
685    # the logs section of buildbot
686    if is_ref:
687        if viewer_url:
688            logdog_dict[base_benchmark_name]['perf_results_ref'] = viewer_url
689        if upload_failure:
690            logdog_dict[base_benchmark_name]['ref_upload_failed'] = 'True'
691    else:
692        # TODO(jmadill): Figure out if we can get a dashboard URL here. http://anglebug.com/40096778
693        # logdog_dict[base_benchmark_name]['dashboard_url'] = (
694        #     upload_results_to_perf_dashboard.GetDashboardUrl(benchmark_name, configuration_name,
695        #                                                      RESULTS_URL,
696        #                                                      build_properties['got_revision_cp'],
697        #                                                      _GetMachineGroup(build_properties)))
698        if viewer_url:
699            logdog_dict[base_benchmark_name]['perf_results'] = viewer_url
700        if upload_failure:
701            logdog_dict[base_benchmark_name]['upload_failed'] = 'True'
702
703
704def print_duration(step, start, end):
705    logging.info('Duration of %s: %d seconds' % (step, end - start))
706
707
708def main():
709    """ See collect_task.collect_task for more on the merge script API. """
710    logging.info(sys.argv)
711    parser = argparse.ArgumentParser()
712    # configuration-name (previously perf-id) is the name of bot the tests run on
713    # For example, buildbot-test is the name of the android-go-perf bot
714    # configuration-name and results-url are set in the json file which is going
715    # away tools/perf/core/chromium.perf.fyi.extras.json
716    parser.add_argument('--configuration-name', help=argparse.SUPPRESS)
717    parser.add_argument('--build-properties', help=argparse.SUPPRESS)
718    parser.add_argument('--summary-json', required=True, help=argparse.SUPPRESS)
719    parser.add_argument('--task-output-dir', required=True, help=argparse.SUPPRESS)
720    parser.add_argument('-o', '--output-json', required=True, help=argparse.SUPPRESS)
721    parser.add_argument(
722        '--skip-perf',
723        action='store_true',
724        help='In lightweight mode, using --skip-perf will skip the performance'
725        ' data handling.')
726    parser.add_argument(
727        '--lightweight',
728        action='store_true',
729        help='Choose the lightweight mode in which the perf result handling'
730        ' is performed on a separate VM.')
731    parser.add_argument('json_files', nargs='*', help=argparse.SUPPRESS)
732    parser.add_argument(
733        '--smoke-test-mode',
734        action='store_true',
735        help='This test should be run in smoke test mode'
736        ' meaning it does not upload to the perf dashboard')
737
738    args = parser.parse_args()
739
740    with open(args.summary_json) as f:
741        shard_summary = json.load(f)
742    shard_failed = any(int(shard.get('exit_code', 1)) != 0 for shard in shard_summary['shards'])
743
744    output_results_dir = tempfile.mkdtemp('outputresults')
745    try:
746        return_code, _ = process_perf_results(args.output_json, args.configuration_name,
747                                              args.build_properties, args.task_output_dir,
748                                              args.smoke_test_mode, output_results_dir,
749                                              args.lightweight, args.skip_perf)
750    except Exception:
751        logging.exception('process_perf_results raised an exception')
752        return_code = 1
753    finally:
754        shutil.rmtree(output_results_dir)
755
756    if return_code != 0 and shard_failed:
757        logging.warning('Perf processing failed but one or more shards failed earlier')
758        return_code = 0  # Enables the failed build info to be rendered normally
759
760    return return_code
761
762
763if __name__ == '__main__':
764    sys.exit(main())
765