1#!/usr/bin/env python3
2# Copyright 2017 gRPC authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#     http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15
16import argparse
17import html
18import multiprocessing
19import os
20import subprocess
21import sys
22
23import python_utils.jobset as jobset
24import python_utils.start_port_server as start_port_server
25
26sys.path.append(
27    os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling',
28                 'microbenchmarks', 'bm_diff'))
29import bm_constants
30
31flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
32
33os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
34if not os.path.exists('reports'):
35    os.makedirs('reports')
36
37start_port_server.start_port_server()
38
39
40def fnize(s):
41    out = ''
42    for c in s:
43        if c in '<>, /':
44            if len(out) and out[-1] == '_':
45                continue
46            out += '_'
47        else:
48            out += c
49    return out
50
51
52# index html
53index_html = """
54<html>
55<head>
56<title>Microbenchmark Results</title>
57</head>
58<body>
59"""
60
61
62def heading(name):
63    global index_html
64    index_html += "<h1>%s</h1>\n" % name
65
66
67def link(txt, tgt):
68    global index_html
69    index_html += "<p><a href=\"%s\">%s</a></p>\n" % (html.escape(
70        tgt, quote=True), html.escape(txt))
71
72
73def text(txt):
74    global index_html
75    index_html += "<p><pre>%s</pre></p>\n" % html.escape(txt)
76
77
78def _bazel_build_benchmark(bm_name, cfg):
79    """Build given benchmark with bazel"""
80    subprocess.check_call([
81        'tools/bazel', 'build',
82        '--config=%s' % cfg,
83        '//test/cpp/microbenchmarks:%s' % bm_name
84    ])
85
86
87def run_summary(bm_name, cfg, base_json_name):
88    _bazel_build_benchmark(bm_name, cfg)
89    cmd = [
90        'bazel-bin/test/cpp/microbenchmarks/%s' % bm_name,
91        '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
92        '--benchmark_out_format=json'
93    ]
94    if args.summary_time is not None:
95        cmd += ['--benchmark_min_time=%d' % args.summary_time]
96    return subprocess.check_output(cmd).decode('UTF-8')
97
98
99def collect_summary(bm_name, args):
100    # no counters, run microbenchmark and add summary
101    # both to HTML report and to console.
102    nocounters_heading = 'Summary: %s' % bm_name
103    nocounters_summary = run_summary(bm_name, 'opt', bm_name)
104    heading(nocounters_heading)
105    text(nocounters_summary)
106    print(nocounters_heading)
107    print(nocounters_summary)
108
109
110collectors = {
111    'summary': collect_summary,
112}
113
114argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
115argp.add_argument('-c',
116                  '--collect',
117                  choices=sorted(collectors.keys()),
118                  nargs='*',
119                  default=sorted(collectors.keys()),
120                  help='Which collectors should be run against each benchmark')
121argp.add_argument('-b',
122                  '--benchmarks',
123                  choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
124                  default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
125                  nargs='+',
126                  type=str,
127                  help='Which microbenchmarks should be run')
128argp.add_argument(
129    '--bq_result_table',
130    default='',
131    type=str,
132    help='Upload results from summary collection to a specified bigquery table.'
133)
134argp.add_argument(
135    '--summary_time',
136    default=None,
137    type=int,
138    help='Minimum time to run benchmarks for the summary collection')
139args = argp.parse_args()
140
141try:
142    for collect in args.collect:
143        for bm_name in args.benchmarks:
144            collectors[collect](bm_name, args)
145finally:
146    if not os.path.exists('reports'):
147        os.makedirs('reports')
148    index_html += "</body>\n</html>\n"
149    with open('reports/index.html', 'w') as f:
150        f.write(index_html)
151