xref: /aosp_15_r20/external/bcc/libbpf-tools/runqlat.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "runqlat.h"
8 #include "bits.bpf.h"
9 #include "maps.bpf.h"
10 #include "core_fixes.bpf.h"
11 
12 #define MAX_ENTRIES	10240
13 #define TASK_RUNNING 	0
14 
15 const volatile bool filter_cg = false;
16 const volatile bool targ_per_process = false;
17 const volatile bool targ_per_thread = false;
18 const volatile bool targ_per_pidns = false;
19 const volatile bool targ_ms = false;
20 const volatile pid_t targ_tgid = 0;
21 
22 struct {
23 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
24 	__type(key, u32);
25 	__type(value, u32);
26 	__uint(max_entries, 1);
27 } cgroup_map SEC(".maps");
28 
29 struct {
30 	__uint(type, BPF_MAP_TYPE_HASH);
31 	__uint(max_entries, MAX_ENTRIES);
32 	__type(key, u32);
33 	__type(value, u64);
34 } start SEC(".maps");
35 
36 static struct hist zero;
37 
38 struct {
39 	__uint(type, BPF_MAP_TYPE_HASH);
40 	__uint(max_entries, MAX_ENTRIES);
41 	__type(key, u32);
42 	__type(value, struct hist);
43 } hists SEC(".maps");
44 
trace_enqueue(u32 tgid,u32 pid)45 static int trace_enqueue(u32 tgid, u32 pid)
46 {
47 	u64 ts;
48 
49 	if (!pid)
50 		return 0;
51 	if (targ_tgid && targ_tgid != tgid)
52 		return 0;
53 
54 	ts = bpf_ktime_get_ns();
55 	bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
56 	return 0;
57 }
58 
pid_namespace(struct task_struct * task)59 static unsigned int pid_namespace(struct task_struct *task)
60 {
61 	struct pid *pid;
62 	unsigned int level;
63 	struct upid upid;
64 	unsigned int inum;
65 
66 	/*  get the pid namespace by following task_active_pid_ns(),
67 	 *  pid->numbers[pid->level].ns
68 	 */
69 	pid = BPF_CORE_READ(task, thread_pid);
70 	level = BPF_CORE_READ(pid, level);
71 	bpf_core_read(&upid, sizeof(upid), &pid->numbers[level]);
72 	inum = BPF_CORE_READ(upid.ns, ns.inum);
73 
74 	return inum;
75 }
76 
handle_switch(bool preempt,struct task_struct * prev,struct task_struct * next)77 static int handle_switch(bool preempt, struct task_struct *prev, struct task_struct *next)
78 {
79 	struct hist *histp;
80 	u64 *tsp, slot;
81 	u32 pid, hkey;
82 	s64 delta;
83 	u64 udelta;
84 
85 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
86 		return 0;
87 
88 	if (get_task_state(prev) == TASK_RUNNING)
89 		trace_enqueue(BPF_CORE_READ(prev, tgid), BPF_CORE_READ(prev, pid));
90 
91 	pid = BPF_CORE_READ(next, pid);
92 
93 	tsp = bpf_map_lookup_elem(&start, &pid);
94 	if (!tsp)
95 		return 0;
96 	delta = bpf_ktime_get_ns() - *tsp;
97 	if (delta < 0)
98 		goto cleanup;
99 	udelta = (u64)delta;
100 
101 	if (targ_per_process)
102 		hkey = BPF_CORE_READ(next, tgid);
103 	else if (targ_per_thread)
104 		hkey = pid;
105 	else if (targ_per_pidns)
106 		hkey = pid_namespace(next);
107 	else
108 		hkey = -1;
109 	histp = bpf_map_lookup_or_try_init(&hists, &hkey, &zero);
110 	if (!histp)
111 		goto cleanup;
112 	if (!histp->comm[0])
113 		bpf_probe_read_kernel_str(&histp->comm, sizeof(histp->comm),
114 					next->comm);
115 	if (targ_ms)
116 		udelta /= 1000000U;
117 	else
118 		udelta /= 1000U;
119 	slot = log2l(udelta);
120 	if (slot >= MAX_SLOTS)
121 		slot = MAX_SLOTS - 1;
122 	__sync_fetch_and_add(&histp->slots[slot], 1);
123 
124 cleanup:
125 	bpf_map_delete_elem(&start, &pid);
126 	return 0;
127 }
128 
129 SEC("tp_btf/sched_wakeup")
BPF_PROG(sched_wakeup,struct task_struct * p)130 int BPF_PROG(sched_wakeup, struct task_struct *p)
131 {
132 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
133 		return 0;
134 
135 	return trace_enqueue(p->tgid, p->pid);
136 }
137 
138 SEC("tp_btf/sched_wakeup_new")
BPF_PROG(sched_wakeup_new,struct task_struct * p)139 int BPF_PROG(sched_wakeup_new, struct task_struct *p)
140 {
141 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
142 		return 0;
143 
144 	return trace_enqueue(p->tgid, p->pid);
145 }
146 
147 SEC("tp_btf/sched_switch")
BPF_PROG(sched_switch,bool preempt,struct task_struct * prev,struct task_struct * next)148 int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
149 {
150 	return handle_switch(preempt, prev, next);
151 }
152 
153 SEC("raw_tp/sched_wakeup")
BPF_PROG(handle_sched_wakeup,struct task_struct * p)154 int BPF_PROG(handle_sched_wakeup, struct task_struct *p)
155 {
156 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
157 		return 0;
158 
159 	return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
160 }
161 
162 SEC("raw_tp/sched_wakeup_new")
BPF_PROG(handle_sched_wakeup_new,struct task_struct * p)163 int BPF_PROG(handle_sched_wakeup_new, struct task_struct *p)
164 {
165 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
166 		return 0;
167 
168 	return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
169 }
170 
171 SEC("raw_tp/sched_switch")
BPF_PROG(handle_sched_switch,bool preempt,struct task_struct * prev,struct task_struct * next)172 int BPF_PROG(handle_sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
173 {
174 	return handle_switch(preempt, prev, next);
175 }
176 
177 char LICENSE[] SEC("license") = "GPL";
178