xref: /aosp_15_r20/external/bcc/libbpf-tools/runqslower.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2019 Facebook
3 #include <vmlinux.h>
4 #include <bpf/bpf_core_read.h>
5 #include <bpf/bpf_helpers.h>
6 #include <bpf/bpf_tracing.h>
7 #include "runqslower.h"
8 #include "core_fixes.bpf.h"
9 
10 #define TASK_RUNNING	0
11 
12 const volatile __u64 min_us = 0;
13 const volatile pid_t targ_pid = 0;
14 const volatile pid_t targ_tgid = 0;
15 
16 struct {
17 	__uint(type, BPF_MAP_TYPE_HASH);
18 	__uint(max_entries, 10240);
19 	__type(key, u32);
20 	__type(value, u64);
21 } start SEC(".maps");
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
25 	__uint(key_size, sizeof(u32));
26 	__uint(value_size, sizeof(u32));
27 } events SEC(".maps");
28 
29 /* record enqueue timestamp */
trace_enqueue(u32 tgid,u32 pid)30 static int trace_enqueue(u32 tgid, u32 pid)
31 {
32 	u64 ts;
33 
34 	if (!pid)
35 		return 0;
36 	if (targ_tgid && targ_tgid != tgid)
37 		return 0;
38 	if (targ_pid && targ_pid != pid)
39 		return 0;
40 
41 	ts = bpf_ktime_get_ns();
42 	bpf_map_update_elem(&start, &pid, &ts, 0);
43 	return 0;
44 }
45 
handle_switch(void * ctx,struct task_struct * prev,struct task_struct * next)46 static int handle_switch(void *ctx, struct task_struct *prev, struct task_struct *next)
47 {
48 	struct event event = {};
49 	u64 *tsp, delta_us;
50 	u32 pid;
51 
52 	/* ivcsw: treat like an enqueue event and store timestamp */
53 	if (get_task_state(prev) == TASK_RUNNING)
54 		trace_enqueue(BPF_CORE_READ(prev, tgid), BPF_CORE_READ(prev, pid));
55 
56 	pid = BPF_CORE_READ(next, pid);
57 
58 	/* fetch timestamp and calculate delta */
59 	tsp = bpf_map_lookup_elem(&start, &pid);
60 	if (!tsp)
61 		return 0;   /* missed enqueue */
62 
63 	delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
64 	if (min_us && delta_us <= min_us)
65 		return 0;
66 
67 	event.pid = pid;
68 	event.prev_pid = BPF_CORE_READ(prev, pid);
69 	event.delta_us = delta_us;
70 	bpf_probe_read_kernel_str(&event.task, sizeof(event.task), next->comm);
71 	bpf_probe_read_kernel_str(&event.prev_task, sizeof(event.prev_task), prev->comm);
72 
73 	/* output */
74 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
75 			      &event, sizeof(event));
76 
77 	bpf_map_delete_elem(&start, &pid);
78 	return 0;
79 }
80 
81 SEC("tp_btf/sched_wakeup")
BPF_PROG(sched_wakeup,struct task_struct * p)82 int BPF_PROG(sched_wakeup, struct task_struct *p)
83 {
84 	return trace_enqueue(p->tgid, p->pid);
85 }
86 
87 SEC("tp_btf/sched_wakeup_new")
BPF_PROG(sched_wakeup_new,struct task_struct * p)88 int BPF_PROG(sched_wakeup_new, struct task_struct *p)
89 {
90 	return trace_enqueue(p->tgid, p->pid);
91 }
92 
93 SEC("tp_btf/sched_switch")
BPF_PROG(sched_switch,bool preempt,struct task_struct * prev,struct task_struct * next)94 int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
95 {
96 	return handle_switch(ctx, prev, next);
97 }
98 
99 SEC("raw_tp/sched_wakeup")
BPF_PROG(handle_sched_wakeup,struct task_struct * p)100 int BPF_PROG(handle_sched_wakeup, struct task_struct *p)
101 {
102 	return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
103 }
104 
105 SEC("raw_tp/sched_wakeup_new")
BPF_PROG(handle_sched_wakeup_new,struct task_struct * p)106 int BPF_PROG(handle_sched_wakeup_new, struct task_struct *p)
107 {
108 	return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
109 }
110 
111 SEC("raw_tp/sched_switch")
BPF_PROG(handle_sched_switch,bool preempt,struct task_struct * prev,struct task_struct * next)112 int BPF_PROG(handle_sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
113 {
114 	return handle_switch(ctx, prev, next);
115 }
116 
117 char LICENSE[] SEC("license") = "GPL";
118