xref: /aosp_15_r20/external/bcc/libbpf-tools/biostacks.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "biostacks.h"
8 #include "bits.bpf.h"
9 #include "maps.bpf.h"
10 #include "core_fixes.bpf.h"
11 
12 #define MAX_ENTRIES	10240
13 
14 const volatile bool targ_ms = false;
15 const volatile bool filter_dev = false;
16 const volatile __u32 targ_dev = -1;
17 
18 struct internal_rqinfo {
19 	u64 start_ts;
20 	struct rqinfo rqinfo;
21 };
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_HASH);
25 	__uint(max_entries, MAX_ENTRIES);
26 	__type(key, struct request *);
27 	__type(value, struct internal_rqinfo);
28 } rqinfos SEC(".maps");
29 
30 struct {
31 	__uint(type, BPF_MAP_TYPE_HASH);
32 	__uint(max_entries, MAX_ENTRIES);
33 	__type(key, struct rqinfo);
34 	__type(value, struct hist);
35 } hists SEC(".maps");
36 
37 static struct hist zero;
38 
39 static __always_inline
trace_start(void * ctx,struct request * rq,bool merge_bio)40 int trace_start(void *ctx, struct request *rq, bool merge_bio)
41 {
42 	struct internal_rqinfo *i_rqinfop = NULL, i_rqinfo = {};
43 	struct gendisk *disk = get_disk(rq);
44 	u32 dev;
45 
46 	dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
47 			BPF_CORE_READ(disk, first_minor)) : 0;
48 	if (filter_dev && targ_dev != dev)
49 		return 0;
50 
51 	if (merge_bio)
52 		i_rqinfop = bpf_map_lookup_elem(&rqinfos, &rq);
53 	if (!i_rqinfop)
54 		i_rqinfop = &i_rqinfo;
55 
56 	i_rqinfop->start_ts = bpf_ktime_get_ns();
57 	i_rqinfop->rqinfo.pid = bpf_get_current_pid_tgid();
58 	i_rqinfop->rqinfo.kern_stack_size =
59 		bpf_get_stack(ctx, i_rqinfop->rqinfo.kern_stack,
60 			sizeof(i_rqinfop->rqinfo.kern_stack), 0);
61 	bpf_get_current_comm(&i_rqinfop->rqinfo.comm,
62 			sizeof(&i_rqinfop->rqinfo.comm));
63 	i_rqinfop->rqinfo.dev = dev;
64 
65 	if (i_rqinfop == &i_rqinfo)
66 		bpf_map_update_elem(&rqinfos, &rq, i_rqinfop, 0);
67 	return 0;
68 }
69 
70 static __always_inline
trace_done(void * ctx,struct request * rq)71 int trace_done(void *ctx, struct request *rq)
72 {
73 	u64 slot, ts = bpf_ktime_get_ns();
74 	struct internal_rqinfo *i_rqinfop;
75 	struct hist *histp;
76 	s64 delta;
77 	u64 udelta;
78 
79 	i_rqinfop = bpf_map_lookup_elem(&rqinfos, &rq);
80 	if (!i_rqinfop)
81 		return 0;
82 	delta = (s64)(ts - i_rqinfop->start_ts);
83 	if (delta < 0)
84 		goto cleanup;
85 	udelta = (u64)delta;
86 	histp = bpf_map_lookup_or_try_init(&hists, &i_rqinfop->rqinfo, &zero);
87 	if (!histp)
88 		goto cleanup;
89 	if (targ_ms)
90 		udelta /= 1000000U;
91 	else
92 		udelta /= 1000U;
93 	slot = log2l(udelta);
94 	if (slot >= MAX_SLOTS)
95 		slot = MAX_SLOTS - 1;
96 	__sync_fetch_and_add(&histp->slots[slot], 1);
97 
98 cleanup:
99 	bpf_map_delete_elem(&rqinfos, &rq);
100 	return 0;
101 }
102 
103 SEC("kprobe/blk_account_io_merge_bio")
BPF_KPROBE(blk_account_io_merge_bio,struct request * rq)104 int BPF_KPROBE(blk_account_io_merge_bio, struct request *rq)
105 {
106 	return trace_start(ctx, rq, true);
107 }
108 
109 SEC("fentry/blk_account_io_start")
BPF_PROG(blk_account_io_start,struct request * rq)110 int BPF_PROG(blk_account_io_start, struct request *rq)
111 {
112 	return trace_start(ctx, rq, false);
113 }
114 
115 SEC("fentry/blk_account_io_done")
BPF_PROG(blk_account_io_done,struct request * rq)116 int BPF_PROG(blk_account_io_done, struct request *rq)
117 {
118 	return trace_done(ctx, rq);
119 }
120 
121 SEC("tp_btf/block_io_start")
BPF_PROG(block_io_start,struct request * rq)122 int BPF_PROG(block_io_start, struct request *rq)
123 {
124 	return trace_start(ctx, rq, false);
125 }
126 
127 SEC("tp_btf/block_io_done")
BPF_PROG(block_io_done,struct request * rq)128 int BPF_PROG(block_io_done, struct request *rq)
129 {
130 	return trace_done(ctx, rq);
131 }
132 
133 char LICENSE[] SEC("license") = "GPL";
134