xref: /aosp_15_r20/external/bcc/libbpf-tools/tcptracer.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2022 Microsoft Corporation
3 //
4 // Based on tcptracer(8) from BCC by Kinvolk GmbH and
5 // tcpconnect(8) by Anton Protopopov
6 #include <vmlinux.h>
7 
8 #include <bpf/bpf_helpers.h>
9 #include <bpf/bpf_core_read.h>
10 #include <bpf/bpf_tracing.h>
11 #include <bpf/bpf_endian.h>
12 #include "tcptracer.h"
13 
14 const volatile uid_t filter_uid = -1;
15 const volatile pid_t filter_pid = 0;
16 
17 /* Define here, because there are conflicts with include files */
18 #define AF_INET		2
19 #define AF_INET6	10
20 
21 /*
22  * tcp_set_state doesn't run in the context of the process that initiated the
23  * connection so we need to store a map TUPLE -> PID to send the right PID on
24  * the event.
25  */
26 struct tuple_key_t {
27 	union {
28 		__u32 saddr_v4;
29 		unsigned __int128 saddr_v6;
30 	};
31 	union {
32 		__u32 daddr_v4;
33 		unsigned __int128 daddr_v6;
34 	};
35 	u16 sport;
36 	u16 dport;
37 	u32 netns;
38 };
39 
40 struct pid_comm_t {
41 	u64 pid;
42 	char comm[TASK_COMM_LEN];
43 	u32 uid;
44 };
45 
46 struct {
47 	__uint(type, BPF_MAP_TYPE_HASH);
48 	__uint(max_entries, MAX_ENTRIES);
49 	__type(key, struct tuple_key_t);
50 	__type(value, struct pid_comm_t);
51 } tuplepid SEC(".maps");
52 
53 struct {
54 	__uint(type, BPF_MAP_TYPE_HASH);
55 	__uint(max_entries, MAX_ENTRIES);
56 	__type(key, u32);
57 	__type(value, struct sock *);
58 } sockets SEC(".maps");
59 
60 struct {
61 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
62 	__uint(key_size, sizeof(u32));
63 	__uint(value_size, sizeof(u32));
64 } events SEC(".maps");
65 
66 
67 static __always_inline bool
fill_tuple(struct tuple_key_t * tuple,struct sock * sk,int family)68 fill_tuple(struct tuple_key_t *tuple, struct sock *sk, int family)
69 {
70 	struct inet_sock *sockp = (struct inet_sock *)sk;
71 
72 	BPF_CORE_READ_INTO(&tuple->netns, sk, __sk_common.skc_net.net, ns.inum);
73 
74 	switch (family) {
75 	case AF_INET:
76 		BPF_CORE_READ_INTO(&tuple->saddr_v4, sk, __sk_common.skc_rcv_saddr);
77 		if (tuple->saddr_v4 == 0)
78 			return false;
79 
80 		BPF_CORE_READ_INTO(&tuple->daddr_v4, sk, __sk_common.skc_daddr);
81 		if (tuple->daddr_v4 == 0)
82 			return false;
83 
84 		break;
85 	case AF_INET6:
86 		BPF_CORE_READ_INTO(&tuple->saddr_v6, sk,
87 				   __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
88 		if (tuple->saddr_v6 == 0)
89 			return false;
90 		BPF_CORE_READ_INTO(&tuple->daddr_v6, sk,
91 				   __sk_common.skc_v6_daddr.in6_u.u6_addr32);
92 		if (tuple->daddr_v6 == 0)
93 			return false;
94 
95 		break;
96 	/* it should not happen but to be sure let's handle this case */
97 	default:
98 		return false;
99 	}
100 
101 	BPF_CORE_READ_INTO(&tuple->dport, sk, __sk_common.skc_dport);
102 	if (tuple->dport == 0)
103 		return false;
104 
105 	BPF_CORE_READ_INTO(&tuple->sport, sockp, inet_sport);
106 	if (tuple->sport == 0)
107 		return false;
108 
109 	return true;
110 }
111 
112 static __always_inline void
fill_event(struct tuple_key_t * tuple,struct event * event,__u32 pid,__u32 uid,__u16 family,__u8 type)113 fill_event(struct tuple_key_t *tuple, struct event *event, __u32 pid,
114 	   __u32 uid, __u16 family, __u8 type)
115 {
116 	event->ts_us = bpf_ktime_get_ns() / 1000;
117 	event->type = type;
118 	event->pid = pid;
119 	event->uid = uid;
120 	event->af = family;
121 	event->netns = tuple->netns;
122 	if (family == AF_INET) {
123 		event->saddr_v4 = tuple->saddr_v4;
124 		event->daddr_v4 = tuple->daddr_v4;
125 	} else {
126 		event->saddr_v6 = tuple->saddr_v6;
127 		event->daddr_v6 = tuple->daddr_v6;
128 	}
129 	event->sport = tuple->sport;
130 	event->dport = tuple->dport;
131 }
132 
133 /* returns true if the event should be skipped */
134 static __always_inline bool
filter_event(struct sock * sk,__u32 uid,__u32 pid)135 filter_event(struct sock *sk, __u32 uid, __u32 pid)
136 {
137 	u16 family = BPF_CORE_READ(sk, __sk_common.skc_family);
138 
139 	if (family != AF_INET && family != AF_INET6)
140 		return true;
141 
142 	if (filter_pid && pid != filter_pid)
143 		return true;
144 
145 	if (filter_uid != (uid_t) -1 && uid != filter_uid)
146 		return true;
147 
148 	return false;
149 }
150 
151 static __always_inline int
enter_tcp_connect(struct pt_regs * ctx,struct sock * sk)152 enter_tcp_connect(struct pt_regs *ctx, struct sock *sk)
153 {
154 	__u64 pid_tgid = bpf_get_current_pid_tgid();
155 	__u32 pid = pid_tgid >> 32;
156 	__u32 tid = pid_tgid;
157 	__u64 uid_gid = bpf_get_current_uid_gid();
158 	__u32 uid = uid_gid;
159 
160 	if (filter_event(sk, uid, pid))
161 		return 0;
162 
163 	bpf_map_update_elem(&sockets, &tid, &sk, 0);
164 	return 0;
165 }
166 
167 static __always_inline int
exit_tcp_connect(struct pt_regs * ctx,int ret,__u16 family)168 exit_tcp_connect(struct pt_regs *ctx, int ret, __u16 family)
169 {
170 	__u64 pid_tgid = bpf_get_current_pid_tgid();
171 	__u32 pid = pid_tgid >> 32;
172 	__u32 tid = pid_tgid;
173 	__u64 uid_gid = bpf_get_current_uid_gid();
174 	__u32 uid = uid_gid;
175 	struct tuple_key_t tuple = {};
176 	struct pid_comm_t pid_comm = {};
177 	struct sock **skpp;
178 	struct sock *sk;
179 
180 	skpp = bpf_map_lookup_elem(&sockets, &tid);
181 	if (!skpp)
182 		return 0;
183 
184 	if (ret)
185 		goto end;
186 
187 	sk = *skpp;
188 
189 	if (!fill_tuple(&tuple, sk, family))
190 		goto end;
191 
192 	pid_comm.pid = pid;
193 	pid_comm.uid = uid;
194 	bpf_get_current_comm(&pid_comm.comm, sizeof(pid_comm.comm));
195 
196 	bpf_map_update_elem(&tuplepid, &tuple, &pid_comm, 0);
197 
198 end:
199 	bpf_map_delete_elem(&sockets, &tid);
200 	return 0;
201 }
202 
203 SEC("kprobe/tcp_v4_connect")
BPF_KPROBE(tcp_v4_connect,struct sock * sk)204 int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
205 {
206 	return enter_tcp_connect(ctx, sk);
207 }
208 
209 SEC("kretprobe/tcp_v4_connect")
BPF_KRETPROBE(tcp_v4_connect_ret,int ret)210 int BPF_KRETPROBE(tcp_v4_connect_ret, int ret)
211 {
212 	return exit_tcp_connect(ctx, ret, AF_INET);
213 }
214 
215 SEC("kprobe/tcp_v6_connect")
BPF_KPROBE(tcp_v6_connect,struct sock * sk)216 int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
217 {
218 	return enter_tcp_connect(ctx, sk);
219 }
220 
221 SEC("kretprobe/tcp_v6_connect")
BPF_KRETPROBE(tcp_v6_connect_ret,int ret)222 int BPF_KRETPROBE(tcp_v6_connect_ret, int ret)
223 {
224 	return exit_tcp_connect(ctx, ret, AF_INET6);
225 }
226 
227 SEC("kprobe/tcp_close")
BPF_KPROBE(entry_trace_close,struct sock * sk)228 int BPF_KPROBE(entry_trace_close, struct sock *sk)
229 {
230 	__u64 pid_tgid = bpf_get_current_pid_tgid();
231 	__u32 pid = pid_tgid >> 32;
232 	__u64 uid_gid = bpf_get_current_uid_gid();
233 	__u32 uid = uid_gid;
234 	struct tuple_key_t tuple = {};
235 	struct event event = {};
236 	u16 family;
237 
238 	if (filter_event(sk, uid, pid))
239 		return 0;
240 
241 	/*
242 	 * Don't generate close events for connections that were never
243 	 * established in the first place.
244 	 */
245 	u8 oldstate = BPF_CORE_READ(sk, __sk_common.skc_state);
246 	if (oldstate == TCP_SYN_SENT ||
247 	    oldstate == TCP_SYN_RECV ||
248 	    oldstate == TCP_NEW_SYN_RECV)
249 		return 0;
250 
251 	family = BPF_CORE_READ(sk, __sk_common.skc_family);
252 	if (!fill_tuple(&tuple, sk, family))
253 		return 0;
254 
255 	fill_event(&tuple, &event, pid, uid, family, TCP_EVENT_TYPE_CLOSE);
256 	bpf_get_current_comm(&event.task, sizeof(event.task));
257 
258 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
259 		      &event, sizeof(event));
260 
261 	return 0;
262 };
263 
264 SEC("kprobe/tcp_set_state")
BPF_KPROBE(enter_tcp_set_state,struct sock * sk,int state)265 int BPF_KPROBE(enter_tcp_set_state, struct sock *sk, int state)
266 {
267 	struct tuple_key_t tuple = {};
268 	struct event event = {};
269 	__u16 family;
270 
271 	if (state != TCP_ESTABLISHED && state != TCP_CLOSE)
272 		goto end;
273 
274 	family = BPF_CORE_READ(sk, __sk_common.skc_family);
275 
276 	if (!fill_tuple(&tuple, sk, family))
277 		goto end;
278 
279 	if (state == TCP_CLOSE)
280 		goto end;
281 
282 	struct pid_comm_t *p;
283 	p = bpf_map_lookup_elem(&tuplepid, &tuple);
284 	if (!p)
285 		return 0; /* missed entry */
286 
287 	fill_event(&tuple, &event, p->pid, p->uid, family, TCP_EVENT_TYPE_CONNECT);
288 	__builtin_memcpy(&event.task, p->comm, sizeof(event.task));
289 
290 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
291 			      &event, sizeof(event));
292 
293 end:
294 	bpf_map_delete_elem(&tuplepid, &tuple);
295 
296 	return 0;
297 }
298 
299 SEC("kretprobe/inet_csk_accept")
BPF_KRETPROBE(exit_inet_csk_accept,struct sock * sk)300 int BPF_KRETPROBE(exit_inet_csk_accept, struct sock *sk)
301 {
302 	__u64 pid_tgid = bpf_get_current_pid_tgid();
303 	__u32 pid = pid_tgid >> 32;
304 	__u64 uid_gid = bpf_get_current_uid_gid();
305 	__u32 uid = uid_gid;
306 	__u16 sport, family;
307 	struct event event = {};
308 
309 	if (!sk)
310 		return 0;
311 
312 	if (filter_event(sk, uid, pid))
313 		return 0;
314 
315 	family = BPF_CORE_READ(sk, __sk_common.skc_family);
316 	sport = BPF_CORE_READ(sk, __sk_common.skc_num);
317 
318 	struct tuple_key_t t = {};
319 	fill_tuple(&t, sk, family);
320 	t.sport = bpf_ntohs(sport);
321 	/* do not send event if IP address is 0.0.0.0 or port is 0 */
322 	if (t.saddr_v6 == 0 || t.daddr_v6 == 0 || t.dport == 0 || t.sport == 0)
323 		return 0;
324 
325 	fill_event(&t, &event, pid, uid, family, TCP_EVENT_TYPE_ACCEPT);
326 
327 	bpf_get_current_comm(&event.task, sizeof(event.task));
328 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
329 			      &event, sizeof(event));
330 
331 	return 0;
332 }
333 
334 
335 char LICENSE[] SEC("license") = "GPL";
336