xref: /aosp_15_r20/external/bcc/tools/zfsslower.py (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1#!/usr/bin/env python
2# @lint-avoid-python-3-compatibility-imports
3#
4# zfsslower  Trace slow ZFS operations.
5#            For Linux, uses BCC, eBPF.
6#
7# USAGE: zfsslower [-h] [-j] [-p PID] [min_ms]
8#
9# This script traces common ZFS file operations: reads, writes, opens, and
10# syncs. It measures the time spent in these operations, and prints details
11# for each that exceeded a threshold.
12#
13# WARNING: This adds low-overhead instrumentation to these ZFS operations,
14# including reads and writes from the file system cache. Such reads and writes
15# can be very frequent (depending on the workload; eg, 1M/sec), at which
16# point the overhead of this tool (even if it prints no "slower" events) can
17# begin to become significant.
18#
19# This works by using kernel dynamic tracing of the ZPL interface, and will
20# need updates to match any changes to this interface.
21#
22# By default, a minimum millisecond threshold of 10 is used.
23#
24# Copyright 2016 Netflix, Inc.
25# Licensed under the Apache License, Version 2.0 (the "License")
26#
27# 14-Feb-2016   Brendan Gregg   Created this.
28# 16-Oct-2016   Dina Goldshtein -p to filter by process ID.
29
30from __future__ import print_function
31from bcc import BPF
32import argparse
33from time import strftime
34
35# arguments
36examples = """examples:
37    ./zfsslower             # trace operations slower than 10 ms (default)
38    ./zfsslower 1           # trace operations slower than 1 ms
39    ./zfsslower -j 1        # ... 1 ms, parsable output (csv)
40    ./zfsslower 0           # trace all operations (warning: verbose)
41    ./zfsslower -p 185      # trace PID 185 only
42"""
43parser = argparse.ArgumentParser(
44    description="Trace common ZFS file operations slower than a threshold",
45    formatter_class=argparse.RawDescriptionHelpFormatter,
46    epilog=examples)
47parser.add_argument("-j", "--csv", action="store_true",
48    help="just print fields: comma-separated values")
49parser.add_argument("-p", "--pid",
50    help="trace this PID only")
51parser.add_argument("min_ms", nargs="?", default='10',
52    help="minimum I/O duration to trace, in ms (default 10)")
53parser.add_argument("--ebpf", action="store_true",
54    help=argparse.SUPPRESS)
55args = parser.parse_args()
56min_ms = int(args.min_ms)
57pid = args.pid
58csv = args.csv
59debug = 0
60
61# define BPF program
62bpf_text = """
63#include <uapi/linux/ptrace.h>
64#include <linux/fs.h>
65#include <linux/sched.h>
66#include <linux/dcache.h>
67
68// XXX: switch these to char's when supported
69#define TRACE_READ      0
70#define TRACE_WRITE     1
71#define TRACE_OPEN      2
72#define TRACE_FSYNC     3
73
74struct val_t {
75    u64 ts;
76    u64 offset;
77    struct file *fp;
78};
79
80struct data_t {
81    // XXX: switch some to u32's when supported
82    u64 ts_us;
83    u64 type;
84    u32 size;
85    u64 offset;
86    u64 delta_us;
87    u32 pid;
88    char task[TASK_COMM_LEN];
89    char file[DNAME_INLINE_LEN];
90};
91
92BPF_HASH(entryinfo, u64, struct val_t);
93BPF_PERF_OUTPUT(events);
94
95//
96// Store timestamp and size on entry
97//
98
99// zpl_read(), zpl_write():
100int trace_rw_entry(struct pt_regs *ctx, struct file *filp, char __user *buf,
101    size_t len, loff_t *ppos)
102{
103    u64 id = bpf_get_current_pid_tgid();
104    u32 pid = id >> 32; // PID is higher part
105
106    if (FILTER_PID)
107        return 0;
108
109    // store filep and timestamp by id
110    struct val_t val = {};
111    val.ts = bpf_ktime_get_ns();
112    val.fp = filp;
113    val.offset = *ppos;
114    if (val.fp)
115        entryinfo.update(&id, &val);
116
117    return 0;
118}
119
120// zpl_open():
121int trace_open_entry(struct pt_regs *ctx, struct inode *inode,
122    struct file *filp)
123{
124    u64 id = bpf_get_current_pid_tgid();
125    u32 pid = id >> 32; // PID is higher part
126
127    if (FILTER_PID)
128        return 0;
129
130    // store filep and timestamp by id
131    struct val_t val = {};
132    val.ts = bpf_ktime_get_ns();
133    val.fp = filp;
134    val.offset = 0;
135    if (val.fp)
136        entryinfo.update(&id, &val);
137
138    return 0;
139}
140
141// zpl_fsync():
142int trace_fsync_entry(struct pt_regs *ctx, struct file *filp)
143{
144    u64 id = bpf_get_current_pid_tgid();
145    u32 pid = id >> 32; // PID is higher part
146
147    if (FILTER_PID)
148        return 0;
149
150    // store filp and timestamp by id
151    struct val_t val = {};
152    val.ts = bpf_ktime_get_ns();
153    val.fp = filp;
154    val.offset = 0;
155    if (val.fp)
156        entryinfo.update(&id, &val);
157
158    return 0;
159}
160
161//
162// Output
163//
164
165static int trace_return(struct pt_regs *ctx, int type)
166{
167    struct val_t *valp;
168    u64 id = bpf_get_current_pid_tgid();
169    u32 pid = id >> 32; // PID is higher part
170
171    valp = entryinfo.lookup(&id);
172    if (valp == 0) {
173        // missed tracing issue or filtered
174        return 0;
175    }
176
177    // calculate delta
178    u64 ts = bpf_ktime_get_ns();
179    u64 delta_us = (ts - valp->ts) / 1000;
180    entryinfo.delete(&id);
181    if (FILTER_US)
182        return 0;
183
184    // populate output struct
185    struct data_t data = {};
186    data.type = type;
187    data.size = PT_REGS_RC(ctx);
188    data.delta_us = delta_us;
189    data.pid = pid;
190    data.ts_us = ts / 1000;
191    data.offset = valp->offset;
192    bpf_get_current_comm(&data.task, sizeof(data.task));
193
194    struct qstr qs = valp->fp->f_path.dentry->d_name;
195    if (qs.len == 0)
196        return 0;
197    bpf_probe_read_kernel(&data.file, sizeof(data.file), (void *)qs.name);
198
199    // output
200    events.perf_submit(ctx, &data, sizeof(data));
201
202    return 0;
203}
204
205int trace_read_return(struct pt_regs *ctx)
206{
207    return trace_return(ctx, TRACE_READ);
208}
209
210int trace_write_return(struct pt_regs *ctx)
211{
212    return trace_return(ctx, TRACE_WRITE);
213}
214
215int trace_open_return(struct pt_regs *ctx)
216{
217    return trace_return(ctx, TRACE_OPEN);
218}
219
220int trace_fsync_return(struct pt_regs *ctx)
221{
222    return trace_return(ctx, TRACE_FSYNC);
223}
224
225"""
226if min_ms == 0:
227    bpf_text = bpf_text.replace('FILTER_US', '0')
228else:
229    bpf_text = bpf_text.replace('FILTER_US',
230        'delta_us <= %s' % str(min_ms * 1000))
231if args.pid:
232    bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
233else:
234    bpf_text = bpf_text.replace('FILTER_PID', '0')
235if debug or args.ebpf:
236    print(bpf_text)
237    if args.ebpf:
238        exit()
239
240# process event
241def print_event(cpu, data, size):
242    event = b["events"].event(data)
243
244    type = 'R'
245    if event.type == 1:
246        type = 'W'
247    elif event.type == 2:
248        type = 'O'
249    elif event.type == 3:
250        type = 'S'
251
252    if (csv):
253        print("%d,%s,%d,%s,%d,%d,%d,%s" % (
254            event.ts_us, event.task.decode('utf-8', 'replace'), event.pid,
255            type, event.size, event.offset, event.delta_us,
256            event.file.decode('utf-8', 'replace')))
257        return
258    print("%-8s %-14.14s %-6s %1s %-7s %-8d %7.2f %s" % (strftime("%H:%M:%S"),
259        event.task.decode('utf-8', 'replace'), event.pid, type, event.size,
260        event.offset / 1024, float(event.delta_us) / 1000,
261        event.file.decode('utf-8', 'replace')))
262
263# initialize BPF
264b = BPF(text=bpf_text)
265
266# common file functions
267if BPF.get_kprobe_functions(b'zpl_iter'):
268    b.attach_kprobe(event="zpl_iter_read", fn_name="trace_rw_entry")
269    b.attach_kprobe(event="zpl_iter_write", fn_name="trace_rw_entry")
270elif BPF.get_kprobe_functions(b'zpl_aio'):
271    b.attach_kprobe(event="zpl_aio_read", fn_name="trace_rw_entry")
272    b.attach_kprobe(event="zpl_aio_write", fn_name="trace_rw_entry")
273else:
274    b.attach_kprobe(event="zpl_read", fn_name="trace_rw_entry")
275    b.attach_kprobe(event="zpl_write", fn_name="trace_rw_entry")
276b.attach_kprobe(event="zpl_open", fn_name="trace_open_entry")
277b.attach_kprobe(event="zpl_fsync", fn_name="trace_fsync_entry")
278if BPF.get_kprobe_functions(b'zpl_iter'):
279    b.attach_kretprobe(event="zpl_iter_read", fn_name="trace_read_return")
280    b.attach_kretprobe(event="zpl_iter_write", fn_name="trace_write_return")
281elif BPF.get_kprobe_functions(b'zpl_aio'):
282    b.attach_kretprobe(event="zpl_aio_read", fn_name="trace_read_return")
283    b.attach_kretprobe(event="zpl_aio_write", fn_name="trace_write_return")
284else:
285    b.attach_kretprobe(event="zpl_read", fn_name="trace_read_return")
286    b.attach_kretprobe(event="zpl_write", fn_name="trace_write_return")
287b.attach_kretprobe(event="zpl_open", fn_name="trace_open_return")
288b.attach_kretprobe(event="zpl_fsync", fn_name="trace_fsync_return")
289
290# header
291if (csv):
292    print("ENDTIME_us,TASK,PID,TYPE,BYTES,OFFSET_b,LATENCY_us,FILE")
293else:
294    if min_ms == 0:
295        print("Tracing ZFS operations")
296    else:
297        print("Tracing ZFS operations slower than %d ms" % min_ms)
298    print("%-8s %-14s %-6s %1s %-7s %-8s %7s %s" % ("TIME", "COMM", "PID", "T",
299        "BYTES", "OFF_KB", "LAT(ms)", "FILENAME"))
300
301# read events
302b["events"].open_perf_buffer(print_event, page_cnt=64)
303while 1:
304    try:
305        b.perf_buffer_poll()
306    except KeyboardInterrupt:
307        exit()
308