xref: /aosp_15_r20/external/bcc/libbpf-tools/biolatency.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 
8 #include "biolatency.h"
9 #include "bits.bpf.h"
10 #include "core_fixes.bpf.h"
11 
12 #define MAX_ENTRIES	10240
13 
14 extern int LINUX_KERNEL_VERSION __kconfig;
15 
16 const volatile bool filter_cg = false;
17 const volatile bool targ_per_disk = false;
18 const volatile bool targ_per_flag = false;
19 const volatile bool targ_queued = false;
20 const volatile bool targ_ms = false;
21 const volatile bool filter_dev = false;
22 const volatile __u32 targ_dev = 0;
23 
24 struct {
25 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
26 	__type(key, u32);
27 	__type(value, u32);
28 	__uint(max_entries, 1);
29 } cgroup_map SEC(".maps");
30 
31 struct {
32 	__uint(type, BPF_MAP_TYPE_HASH);
33 	__uint(max_entries, MAX_ENTRIES);
34 	__type(key, struct request *);
35 	__type(value, u64);
36 } start SEC(".maps");
37 
38 static struct hist initial_hist;
39 
40 struct {
41 	__uint(type, BPF_MAP_TYPE_HASH);
42 	__uint(max_entries, MAX_ENTRIES);
43 	__type(key, struct hist_key);
44 	__type(value, struct hist);
45 } hists SEC(".maps");
46 
trace_rq_start(struct request * rq,int issue)47 static int __always_inline trace_rq_start(struct request *rq, int issue)
48 {
49 	u64 ts;
50 
51 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
52 		return 0;
53 
54 	if (issue && targ_queued && BPF_CORE_READ(rq, q, elevator))
55 		return 0;
56 
57 	ts = bpf_ktime_get_ns();
58 
59 	if (filter_dev) {
60 		struct gendisk *disk = get_disk(rq);
61 		u32 dev;
62 
63 		dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
64 				BPF_CORE_READ(disk, first_minor)) : 0;
65 		if (targ_dev != dev)
66 			return 0;
67 	}
68 	bpf_map_update_elem(&start, &rq, &ts, 0);
69 	return 0;
70 }
71 
handle_block_rq_insert(__u64 * ctx)72 static int handle_block_rq_insert(__u64 *ctx)
73 {
74 	/**
75 	 * commit a54895fa (v5.11-rc1) changed tracepoint argument list
76 	 * from TP_PROTO(struct request_queue *q, struct request *rq)
77 	 * to TP_PROTO(struct request *rq)
78 	 */
79 	if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0))
80 		return trace_rq_start((void *)ctx[1], false);
81 	else
82 		return trace_rq_start((void *)ctx[0], false);
83 }
84 
handle_block_rq_issue(__u64 * ctx)85 static int handle_block_rq_issue(__u64 *ctx)
86 {
87 	/**
88 	 * commit a54895fa (v5.11-rc1) changed tracepoint argument list
89 	 * from TP_PROTO(struct request_queue *q, struct request *rq)
90 	 * to TP_PROTO(struct request *rq)
91 	 */
92 	if (LINUX_KERNEL_VERSION < KERNEL_VERSION(5, 11, 0))
93 		return trace_rq_start((void *)ctx[1], true);
94 	else
95 		return trace_rq_start((void *)ctx[0], true);
96 }
97 
handle_block_rq_complete(struct request * rq,int error,unsigned int nr_bytes)98 static int handle_block_rq_complete(struct request *rq, int error, unsigned int nr_bytes)
99 {
100 	u64 slot, *tsp, ts = bpf_ktime_get_ns();
101 	struct hist_key hkey = {};
102 	struct hist *histp;
103 	s64 delta;
104 	u64 udelta;
105 
106 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
107 		return 0;
108 
109 	tsp = bpf_map_lookup_elem(&start, &rq);
110 	if (!tsp)
111 		return 0;
112 
113 	delta = (s64)(ts - *tsp);
114 	if (delta < 0)
115 		goto cleanup;
116 
117 	udelta = (u64)delta;
118 
119 	if (targ_per_disk) {
120 		struct gendisk *disk = get_disk(rq);
121 
122 		hkey.dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
123 					BPF_CORE_READ(disk, first_minor)) : 0;
124 	}
125 	if (targ_per_flag)
126 		hkey.cmd_flags = BPF_CORE_READ(rq, cmd_flags);
127 
128 	histp = bpf_map_lookup_elem(&hists, &hkey);
129 	if (!histp) {
130 		bpf_map_update_elem(&hists, &hkey, &initial_hist, 0);
131 		histp = bpf_map_lookup_elem(&hists, &hkey);
132 		if (!histp)
133 			goto cleanup;
134 	}
135 
136 	if (targ_ms)
137 		udelta /= 1000000U;
138 	else
139 		udelta /= 1000U;
140 	slot = log2l(udelta);
141 	if (slot >= MAX_SLOTS)
142 		slot = MAX_SLOTS - 1;
143 	__sync_fetch_and_add(&histp->slots[slot], 1);
144 
145 cleanup:
146 	bpf_map_delete_elem(&start, &rq);
147 	return 0;
148 }
149 
150 SEC("tp_btf/block_rq_insert")
block_rq_insert_btf(u64 * ctx)151 int block_rq_insert_btf(u64 *ctx)
152 {
153 	return handle_block_rq_insert(ctx);
154 }
155 
156 SEC("tp_btf/block_rq_issue")
block_rq_issue_btf(u64 * ctx)157 int block_rq_issue_btf(u64 *ctx)
158 {
159 	return handle_block_rq_issue(ctx);
160 }
161 
162 SEC("tp_btf/block_rq_complete")
BPF_PROG(block_rq_complete_btf,struct request * rq,int error,unsigned int nr_bytes)163 int BPF_PROG(block_rq_complete_btf, struct request *rq, int error, unsigned int nr_bytes)
164 {
165 	return handle_block_rq_complete(rq, error, nr_bytes);
166 }
167 
168 SEC("raw_tp/block_rq_insert")
BPF_PROG(block_rq_insert)169 int BPF_PROG(block_rq_insert)
170 {
171 	return handle_block_rq_insert(ctx);
172 }
173 
174 SEC("raw_tp/block_rq_issue")
BPF_PROG(block_rq_issue)175 int BPF_PROG(block_rq_issue)
176 {
177 	return handle_block_rq_issue(ctx);
178 }
179 
180 SEC("raw_tp/block_rq_complete")
BPF_PROG(block_rq_complete,struct request * rq,int error,unsigned int nr_bytes)181 int BPF_PROG(block_rq_complete, struct request *rq, int error, unsigned int nr_bytes)
182 {
183 	return handle_block_rq_complete(rq, error, nr_bytes);
184 }
185 
186 char LICENSE[] SEC("license") = "GPL";
187