xref: /aosp_15_r20/external/bcc/libbpf-tools/cpudist.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "cpudist.h"
8 #include "bits.bpf.h"
9 #include "core_fixes.bpf.h"
10 
11 #define TASK_RUNNING	0
12 
13 const volatile bool filter_cg = false;
14 const volatile bool targ_per_process = false;
15 const volatile bool targ_per_thread = false;
16 const volatile bool targ_offcpu = false;
17 const volatile bool targ_ms = false;
18 const volatile pid_t targ_tgid = -1;
19 
20 struct {
21 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
22 	__type(key, u32);
23 	__type(value, u32);
24 	__uint(max_entries, 1);
25 } cgroup_map SEC(".maps");
26 
27 struct {
28 	__uint(type, BPF_MAP_TYPE_HASH);
29 	__type(key, u32);
30 	__type(value, u64);
31 } start SEC(".maps");
32 
33 static struct hist initial_hist;
34 
35 struct {
36 	__uint(type, BPF_MAP_TYPE_HASH);
37 	__type(key, u32);
38 	__type(value, struct hist);
39 } hists SEC(".maps");
40 
store_start(u32 tgid,u32 pid,u64 ts)41 static __always_inline void store_start(u32 tgid, u32 pid, u64 ts)
42 {
43 	if (targ_tgid != -1 && targ_tgid != tgid)
44 		return;
45 	bpf_map_update_elem(&start, &pid, &ts, 0);
46 }
47 
update_hist(struct task_struct * task,u32 tgid,u32 pid,u64 ts)48 static __always_inline void update_hist(struct task_struct *task,
49 					u32 tgid, u32 pid, u64 ts)
50 {
51 	u64 delta, *tsp, slot;
52 	struct hist *histp;
53 	u32 id;
54 
55 	if (targ_tgid != -1 && targ_tgid != tgid)
56 		return;
57 
58 	tsp = bpf_map_lookup_elem(&start, &pid);
59 	if (!tsp || ts < *tsp)
60 		return;
61 
62 	if (targ_per_process)
63 		id = tgid;
64 	else if (targ_per_thread)
65 		id = pid;
66 	else
67 		id = -1;
68 	histp = bpf_map_lookup_elem(&hists, &id);
69 	if (!histp) {
70 		bpf_map_update_elem(&hists, &id, &initial_hist, 0);
71 		histp = bpf_map_lookup_elem(&hists, &id);
72 		if (!histp)
73 			return;
74 		BPF_CORE_READ_STR_INTO(&histp->comm, task, comm);
75 	}
76 	delta = ts - *tsp;
77 	if (targ_ms)
78 		delta /= 1000000;
79 	else
80 		delta /= 1000;
81 	slot = log2l(delta);
82 	if (slot >= MAX_SLOTS)
83 		slot = MAX_SLOTS - 1;
84 	__sync_fetch_and_add(&histp->slots[slot], 1);
85 }
86 
handle_switch(struct task_struct * prev,struct task_struct * next)87 static int handle_switch(struct task_struct *prev, struct task_struct *next)
88 {
89 	u32 prev_tgid = BPF_CORE_READ(prev, tgid), prev_pid = BPF_CORE_READ(prev, pid);
90 	u32 tgid = BPF_CORE_READ(next, tgid), pid = BPF_CORE_READ(next, pid);
91 	u64 ts = bpf_ktime_get_ns();
92 
93 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
94 		return 0;
95 
96 	if (targ_offcpu) {
97 		store_start(prev_tgid, prev_pid, ts);
98 		update_hist(next, tgid, pid, ts);
99 	} else {
100 		if (get_task_state(prev) == TASK_RUNNING)
101 			update_hist(prev, prev_tgid, prev_pid, ts);
102 		store_start(tgid, pid, ts);
103 	}
104 	return 0;
105 }
106 
107 SEC("tp_btf/sched_switch")
BPF_PROG(sched_switch_btf,bool preempt,struct task_struct * prev,struct task_struct * next)108 int BPF_PROG(sched_switch_btf, bool preempt, struct task_struct *prev,
109 	     struct task_struct *next)
110 {
111 	return handle_switch(prev, next);
112 }
113 
114 SEC("raw_tp/sched_switch")
BPF_PROG(sched_switch_tp,bool preempt,struct task_struct * prev,struct task_struct * next)115 int BPF_PROG(sched_switch_tp, bool preempt, struct task_struct *prev,
116 	     struct task_struct *next)
117 {
118 	return handle_switch(prev, next);
119 }
120 
121 char LICENSE[] SEC("license") = "GPL";
122