xref: /aosp_15_r20/external/grpc-grpc/tools/profiling/qps/qps_diff.py (revision cc02d7e222339f7a4f6ba5f422e6413f4bd931f2)
1#!/usr/bin/env python3
2#
3# Copyright 2017 gRPC authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16""" Computes the diff between two qps runs and outputs significant results """
17
18import argparse
19import json
20import multiprocessing
21import os
22import shutil
23import subprocess
24import sys
25
26import qps_scenarios
27import tabulate
28
29sys.path.append(
30    os.path.join(
31        os.path.dirname(sys.argv[0]), "..", "microbenchmarks", "bm_diff"
32    )
33)
34import bm_speedup
35
36sys.path.append(
37    os.path.join(
38        os.path.dirname(sys.argv[0]), "..", "..", "run_tests", "python_utils"
39    )
40)
41import check_on_pr
42
43
44def _args():
45    argp = argparse.ArgumentParser(description="Perform diff on QPS Driver")
46    argp.add_argument(
47        "-d",
48        "--diff_base",
49        type=str,
50        help="Commit or branch to compare the current one to",
51    )
52    argp.add_argument(
53        "-l",
54        "--loops",
55        type=int,
56        default=4,
57        help=(
58            "Number of loops for each benchmark. More loops cuts down on noise"
59        ),
60    )
61    argp.add_argument(
62        "-j",
63        "--jobs",
64        type=int,
65        default=multiprocessing.cpu_count(),
66        help="Number of CPUs to use",
67    )
68    args = argp.parse_args()
69    assert args.diff_base, "diff_base must be set"
70    return args
71
72
73def _make_cmd(jobs):
74    return ["make", "-j", "%d" % jobs, "qps_json_driver", "qps_worker"]
75
76
77def build(name, jobs):
78    shutil.rmtree("qps_diff_%s" % name, ignore_errors=True)
79    subprocess.check_call(["git", "submodule", "update"])
80    try:
81        subprocess.check_call(_make_cmd(jobs))
82    except subprocess.CalledProcessError as e:
83        subprocess.check_call(["make", "clean"])
84        subprocess.check_call(_make_cmd(jobs))
85    os.rename("bins", "qps_diff_%s" % name)
86
87
88def _run_cmd(name, scenario, fname):
89    return [
90        "qps_diff_%s/opt/qps_json_driver" % name,
91        "--scenarios_json",
92        scenario,
93        "--json_file_out",
94        fname,
95    ]
96
97
98def run(name, scenarios, loops):
99    for sn in scenarios:
100        for i in range(0, loops):
101            fname = "%s.%s.%d.json" % (sn, name, i)
102            subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
103
104
105def _load_qps(fname):
106    try:
107        with open(fname) as f:
108            return json.loads(f.read())["qps"]
109    except IOError as e:
110        print(("IOError occurred reading file: %s" % fname))
111        return None
112    except ValueError as e:
113        print(("ValueError occurred reading file: %s" % fname))
114        return None
115
116
117def _median(ary):
118    assert len(ary)
119    ary = sorted(ary)
120    n = len(ary)
121    if n % 2 == 0:
122        return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
123    else:
124        return ary[n / 2]
125
126
127def diff(scenarios, loops, old, new):
128    old_data = {}
129    new_data = {}
130
131    # collect data
132    for sn in scenarios:
133        old_data[sn] = []
134        new_data[sn] = []
135        for i in range(loops):
136            old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
137            new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
138
139    # crunch data
140    headers = ["Benchmark", "qps"]
141    rows = []
142    for sn in scenarios:
143        mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
144        print(
145            "%s: %s=%r %s=%r mdn_diff=%r"
146            % (sn, new, new_data[sn], old, old_data[sn], mdn_diff)
147        )
148        s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
149        if abs(s) > 3 and mdn_diff > 0.5:
150            rows.append([sn, "%+d%%" % s])
151
152    if rows:
153        return tabulate.tabulate(rows, headers=headers, floatfmt="+.2f")
154    else:
155        return None
156
157
158def main(args):
159    build("new", args.jobs)
160
161    if args.diff_base:
162        where_am_i = (
163            subprocess.check_output(
164                ["git", "rev-parse", "--abbrev-ref", "HEAD"]
165            )
166            .decode()
167            .strip()
168        )
169        subprocess.check_call(["git", "checkout", args.diff_base])
170        try:
171            build("old", args.jobs)
172        finally:
173            subprocess.check_call(["git", "checkout", where_am_i])
174            subprocess.check_call(["git", "submodule", "update"])
175
176    run("new", qps_scenarios._SCENARIOS, args.loops)
177    run("old", qps_scenarios._SCENARIOS, args.loops)
178
179    diff_output = diff(qps_scenarios._SCENARIOS, args.loops, "old", "new")
180
181    if diff_output:
182        text = "[qps] Performance differences noted:\n%s" % diff_output
183    else:
184        text = "[qps] No significant performance differences"
185    print(("%s" % text))
186    check_on_pr.check_on_pr("QPS", "```\n%s\n```" % text)
187
188
189if __name__ == "__main__":
190    args = _args()
191    main(args)
192