1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2022 Francis Laniel <[email protected]>
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7
8 #include "biotop.h"
9 #include "maps.bpf.h"
10 #include "core_fixes.bpf.h"
11
12 struct {
13 __uint(type, BPF_MAP_TYPE_HASH);
14 __uint(max_entries, 10240);
15 __type(key, struct request *);
16 __type(value, struct start_req_t);
17 } start SEC(".maps");
18
19 struct {
20 __uint(type, BPF_MAP_TYPE_HASH);
21 __uint(max_entries, 10240);
22 __type(key, struct request *);
23 __type(value, struct who_t);
24 } whobyreq SEC(".maps");
25
26 struct {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __uint(max_entries, 10240);
29 __type(key, struct info_t);
30 __type(value, struct val_t);
31 } counts SEC(".maps");
32
33 static __always_inline
trace_start(struct request * req)34 int trace_start(struct request *req)
35 {
36 struct who_t who = {};
37
38 /* cache PID and comm by-req */
39 bpf_get_current_comm(&who.name, sizeof(who.name));
40 who.pid = bpf_get_current_pid_tgid() >> 32;
41 bpf_map_update_elem(&whobyreq, &req, &who, 0);
42
43 return 0;
44 }
45
46 SEC("kprobe/blk_mq_start_request")
BPF_KPROBE(blk_mq_start_request,struct request * req)47 int BPF_KPROBE(blk_mq_start_request, struct request *req)
48 {
49 /* time block I/O */
50 struct start_req_t start_req;
51
52 start_req.ts = bpf_ktime_get_ns();
53 start_req.data_len = BPF_CORE_READ(req, __data_len);
54
55 bpf_map_update_elem(&start, &req, &start_req, 0);
56 return 0;
57 }
58
59 static __always_inline
trace_done(struct request * req)60 int trace_done(struct request *req)
61 {
62 struct val_t *valp, zero = {};
63 struct info_t info = {};
64 struct start_req_t *startp;
65 unsigned int cmd_flags;
66 struct gendisk *disk;
67 struct who_t *whop;
68 u64 delta_us;
69
70 /* fetch timestamp and calculate delta */
71 startp = bpf_map_lookup_elem(&start, &req);
72 if (!startp)
73 return 0; /* missed tracing issue */
74
75 delta_us = (bpf_ktime_get_ns() - startp->ts) / 1000;
76
77 /* setup info_t key */
78 cmd_flags = BPF_CORE_READ(req, cmd_flags);
79
80 disk = get_disk(req);
81 info.major = BPF_CORE_READ(disk, major);
82 info.minor = BPF_CORE_READ(disk, first_minor);
83 info.rwflag = !!((cmd_flags & REQ_OP_MASK) == REQ_OP_WRITE);
84
85 whop = bpf_map_lookup_elem(&whobyreq, &req);
86 if (whop) {
87 info.pid = whop->pid;
88 __builtin_memcpy(&info.name, whop->name, sizeof(info.name));
89 }
90
91 valp = bpf_map_lookup_or_try_init(&counts, &info, &zero);
92
93 if (valp) {
94 /* save stats */
95 valp->us += delta_us;
96 valp->bytes += startp->data_len;
97 valp->io++;
98 }
99
100 bpf_map_delete_elem(&start, &req);
101 bpf_map_delete_elem(&whobyreq, &req);
102
103 return 0;
104 }
105
106 SEC("kprobe/blk_account_io_start")
BPF_KPROBE(blk_account_io_start,struct request * req)107 int BPF_KPROBE(blk_account_io_start, struct request *req)
108 {
109 return trace_start(req);
110 }
111
112 SEC("kprobe/blk_account_io_done")
BPF_KPROBE(blk_account_io_done,struct request * req)113 int BPF_KPROBE(blk_account_io_done, struct request *req)
114 {
115 return trace_done(req);
116 }
117
118 SEC("kprobe/__blk_account_io_start")
BPF_KPROBE(__blk_account_io_start,struct request * req)119 int BPF_KPROBE(__blk_account_io_start, struct request *req)
120 {
121 return trace_start(req);
122 }
123
124 SEC("kprobe/__blk_account_io_done")
BPF_KPROBE(__blk_account_io_done,struct request * req)125 int BPF_KPROBE(__blk_account_io_done, struct request *req)
126 {
127 return trace_done(req);
128 }
129
130 SEC("tp_btf/block_io_start")
BPF_PROG(block_io_start,struct request * req)131 int BPF_PROG(block_io_start, struct request *req)
132 {
133 return trace_start(req);
134 }
135
136 SEC("tp_btf/block_io_done")
BPF_PROG(block_io_done,struct request * req)137 int BPF_PROG(block_io_done, struct request *req)
138 {
139 return trace_done(req);
140 }
141
142 char LICENSE[] SEC("license") = "GPL";
143