xref: /aosp_15_r20/external/bcc/libbpf-tools/offcputime.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2021 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "offcputime.h"
8 #include "core_fixes.bpf.h"
9 
10 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
11 #define MAX_ENTRIES		10240
12 
13 const volatile bool kernel_threads_only = false;
14 const volatile bool user_threads_only = false;
15 const volatile __u64 max_block_ns = -1;
16 const volatile __u64 min_block_ns = 1;
17 const volatile pid_t targ_tgid = -1;
18 const volatile pid_t targ_pid = -1;
19 const volatile long state = -1;
20 
21 struct internal_key {
22 	u64 start_ts;
23 	struct key_t key;
24 };
25 
26 struct {
27 	__uint(type, BPF_MAP_TYPE_HASH);
28 	__type(key, u32);
29 	__type(value, struct internal_key);
30 	__uint(max_entries, MAX_ENTRIES);
31 } start SEC(".maps");
32 
33 struct {
34 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
35 	__uint(key_size, sizeof(u32));
36 } stackmap SEC(".maps");
37 
38 struct {
39 	__uint(type, BPF_MAP_TYPE_HASH);
40 	__type(key, struct key_t);
41 	__type(value, struct val_t);
42 	__uint(max_entries, MAX_ENTRIES);
43 } info SEC(".maps");
44 
allow_record(struct task_struct * t)45 static bool allow_record(struct task_struct *t)
46 {
47 	if (targ_tgid != -1 && targ_tgid != t->tgid)
48 		return false;
49 	if (targ_pid != -1 && targ_pid != t->pid)
50 		return false;
51 	if (user_threads_only && t->flags & PF_KTHREAD)
52 		return false;
53 	else if (kernel_threads_only && !(t->flags & PF_KTHREAD))
54 		return false;
55 	if (state != -1 && get_task_state(t) != state)
56 		return false;
57 	return true;
58 }
59 
60 SEC("tp_btf/sched_switch")
BPF_PROG(sched_switch,bool preempt,struct task_struct * prev,struct task_struct * next)61 int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
62 {
63 	struct internal_key *i_keyp, i_key;
64 	struct val_t *valp, val;
65 	s64 delta;
66 	u64 udelta;
67 	u32 pid;
68 
69 	if (allow_record(prev)) {
70 		pid = prev->pid;
71 		/* To distinguish idle threads of different cores */
72 		if (!pid)
73 			pid = bpf_get_smp_processor_id();
74 		i_key.key.pid = pid;
75 		i_key.key.tgid = prev->tgid;
76 		i_key.start_ts = bpf_ktime_get_ns();
77 
78 		if (prev->flags & PF_KTHREAD)
79 			i_key.key.user_stack_id = -1;
80 		else
81 			i_key.key.user_stack_id =
82 				bpf_get_stackid(ctx, &stackmap,
83 						BPF_F_USER_STACK);
84 		i_key.key.kern_stack_id = bpf_get_stackid(ctx, &stackmap, 0);
85 		bpf_map_update_elem(&start, &pid, &i_key, 0);
86 		bpf_probe_read_kernel_str(&val.comm, sizeof(prev->comm), prev->comm);
87 		val.delta = 0;
88 		bpf_map_update_elem(&info, &i_key.key, &val, BPF_NOEXIST);
89 	}
90 
91 	pid = next->pid;
92 	i_keyp = bpf_map_lookup_elem(&start, &pid);
93 	if (!i_keyp)
94 		return 0;
95 	delta = (s64)(bpf_ktime_get_ns() - i_keyp->start_ts);
96 	if (delta < 0)
97 		goto cleanup;
98 	udelta = (u64)delta;
99 	udelta /= 1000U;
100 	if (udelta < min_block_ns || udelta > max_block_ns)
101 		goto cleanup;
102 	valp = bpf_map_lookup_elem(&info, &i_keyp->key);
103 	if (!valp)
104 		goto cleanup;
105 	__sync_fetch_and_add(&valp->delta, udelta);
106 
107 cleanup:
108 	bpf_map_delete_elem(&start, &pid);
109 	return 0;
110 }
111 
112 char LICENSE[] SEC("license") = "GPL";
113