xref: /aosp_15_r20/external/bcc/libbpf-tools/solisten.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 /* Copyright (c) 2021 Hengqi Chen */
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_endian.h>
7 #include <bpf/bpf_tracing.h>
8 #include "solisten.h"
9 
10 #define MAX_ENTRIES	10240
11 #define AF_INET	2
12 #define AF_INET6	10
13 
14 const volatile pid_t target_pid = 0;
15 
16 struct {
17 	__uint(type, BPF_MAP_TYPE_HASH);
18 	__uint(max_entries, MAX_ENTRIES);
19 	__type(key, __u32);
20 	__type(value, struct event);
21 } values SEC(".maps");
22 
23 struct {
24 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
25 	__uint(key_size, sizeof(__u32));
26 	__uint(value_size, sizeof(__u32));
27 } events SEC(".maps");
28 
fill_event(struct event * event,struct socket * sock)29 static void fill_event(struct event *event, struct socket *sock)
30 {
31 	__u16 family, type;
32 	struct sock *sk;
33 	struct inet_sock *inet;
34 
35 	sk = BPF_CORE_READ(sock, sk);
36 	inet = (struct inet_sock *)sk;
37 	family = BPF_CORE_READ(sk, __sk_common.skc_family);
38 	type = BPF_CORE_READ(sock, type);
39 
40 	event->proto = ((__u32)family << 16) | type;
41 	event->port = bpf_ntohs(BPF_CORE_READ(inet, inet_sport));
42 	if (family == AF_INET)
43 		event->addr[0] = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
44 	else if (family == AF_INET6)
45 		BPF_CORE_READ_INTO(event->addr, sk, __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
46 	bpf_get_current_comm(event->task, sizeof(event->task));
47 }
48 
49 SEC("kprobe/inet_listen")
BPF_KPROBE(inet_listen_entry,struct socket * sock,int backlog)50 int BPF_KPROBE(inet_listen_entry, struct socket *sock, int backlog)
51 {
52 	__u64 pid_tgid = bpf_get_current_pid_tgid();
53 	__u32 pid = pid_tgid >> 32;
54 	__u32 tid = (__u32)pid_tgid;
55 	struct event event = {};
56 
57 	if (target_pid && target_pid != pid)
58 		return 0;
59 
60 	fill_event(&event, sock);
61 	event.pid = pid;
62 	event.backlog = backlog;
63 	bpf_map_update_elem(&values, &tid, &event, BPF_ANY);
64 	return 0;
65 }
66 
67 SEC("kretprobe/inet_listen")
BPF_KRETPROBE(inet_listen_exit,int ret)68 int BPF_KRETPROBE(inet_listen_exit, int ret)
69 {
70 	__u32 tid = bpf_get_current_pid_tgid();
71 	struct event *eventp;
72 
73 	eventp = bpf_map_lookup_elem(&values, &tid);
74 	if (!eventp)
75 		return 0;
76 
77 	eventp->ret = ret;
78 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, eventp, sizeof(*eventp));
79 	bpf_map_delete_elem(&values, &tid);
80 	return 0;
81 }
82 
83 SEC("fexit/inet_listen")
BPF_PROG(inet_listen_fexit,struct socket * sock,int backlog,int ret)84 int BPF_PROG(inet_listen_fexit, struct socket *sock, int backlog, int ret)
85 {
86 	__u64 pid_tgid = bpf_get_current_pid_tgid();
87 	__u32 pid = pid_tgid >> 32;
88 	struct event event = {};
89 
90 	if (target_pid && target_pid != pid)
91 		return 0;
92 
93 	fill_event(&event, sock);
94 	event.pid = pid;
95 	event.backlog = backlog;
96 	event.ret = ret;
97 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
98 	return 0;
99 }
100 
101 char LICENSE[] SEC("license") = "Dual BSD/GPL";
102