xref: /aosp_15_r20/external/bcc/libbpf-tools/bindsnoop.bpf.c (revision 387f9dfdfa2baef462e92476d413c7bc2470293e)
1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2 /* Copyright (c) 2021 Hengqi Chen */
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include <bpf/bpf_endian.h>
8 #include "bindsnoop.h"
9 
10 #define MAX_ENTRIES	10240
11 #define MAX_PORTS	1024
12 
13 const volatile bool filter_cg = false;
14 const volatile pid_t target_pid = 0;
15 const volatile bool ignore_errors = true;
16 const volatile bool filter_by_port = false;
17 
18 struct {
19 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
20 	__type(key, u32);
21 	__type(value, u32);
22 	__uint(max_entries, 1);
23 } cgroup_map SEC(".maps");
24 
25 struct {
26 	__uint(type, BPF_MAP_TYPE_HASH);
27 	__uint(max_entries, MAX_ENTRIES);
28 	__type(key, __u32);
29 	__type(value, struct socket *);
30 } sockets SEC(".maps");
31 
32 struct {
33 	__uint(type, BPF_MAP_TYPE_HASH);
34 	__uint(max_entries, MAX_PORTS);
35 	__type(key, __u16);
36 	__type(value, __u16);
37 } ports SEC(".maps");
38 
39 struct {
40 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
41 	__uint(key_size, sizeof(__u32));
42 	__uint(value_size, sizeof(__u32));
43 } events SEC(".maps");
44 
probe_entry(struct pt_regs * ctx,struct socket * socket)45 static int probe_entry(struct pt_regs *ctx, struct socket *socket)
46 {
47 	__u64 pid_tgid = bpf_get_current_pid_tgid();
48 	__u32 pid = pid_tgid >> 32;
49 	__u32 tid = (__u32)pid_tgid;
50 
51 	if (target_pid && target_pid != pid)
52 		return 0;
53 
54 	bpf_map_update_elem(&sockets, &tid, &socket, BPF_ANY);
55 	return 0;
56 };
57 
probe_exit(struct pt_regs * ctx,short ver)58 static int probe_exit(struct pt_regs *ctx, short ver)
59 {
60 	__u64 pid_tgid = bpf_get_current_pid_tgid();
61 	__u32 pid = pid_tgid >> 32;
62 	__u32 tid = (__u32)pid_tgid;
63 	struct socket **socketp, *socket;
64 	struct inet_sock *inet_sock;
65 	struct sock *sock;
66 	union bind_options opts;
67 	struct bind_event event = {};
68 	__u16 sport = 0, *port;
69 	int ret;
70 
71 	socketp = bpf_map_lookup_elem(&sockets, &tid);
72 	if (!socketp)
73 		return 0;
74 
75 	ret = PT_REGS_RC(ctx);
76 	if (ignore_errors && ret != 0)
77 		goto cleanup;
78 
79 	socket = *socketp;
80 	sock = BPF_CORE_READ(socket, sk);
81 	inet_sock = (struct inet_sock *)sock;
82 
83 	sport = bpf_ntohs(BPF_CORE_READ(inet_sock, inet_sport));
84 	port = bpf_map_lookup_elem(&ports, &sport);
85 	if (filter_by_port && !port)
86 		goto cleanup;
87 
88 	opts.fields.freebind             = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, freebind);
89 	opts.fields.transparent          = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, transparent);
90 	opts.fields.bind_address_no_port = BPF_CORE_READ_BITFIELD_PROBED(inet_sock, bind_address_no_port);
91 	opts.fields.reuseaddress         = BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuse);
92 	opts.fields.reuseport            = BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuseport);
93 	event.opts = opts.data;
94 	event.ts_us = bpf_ktime_get_ns() / 1000;
95 	event.pid = pid;
96 	event.port = sport;
97 	event.bound_dev_if = BPF_CORE_READ(sock, __sk_common.skc_bound_dev_if);
98 	event.ret = ret;
99 	event.proto = BPF_CORE_READ_BITFIELD_PROBED(sock, sk_protocol);
100 	bpf_get_current_comm(&event.task, sizeof(event.task));
101 	if (ver == 4) {
102 		event.ver = ver;
103 		bpf_probe_read_kernel(&event.addr, sizeof(event.addr), &inet_sock->inet_saddr);
104 	} else { /* ver == 6 */
105 		event.ver = ver;
106 		bpf_probe_read_kernel(&event.addr, sizeof(event.addr), sock->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
107 	}
108 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
109 
110 cleanup:
111 	bpf_map_delete_elem(&sockets, &tid);
112 	return 0;
113 }
114 
115 SEC("kprobe/inet_bind")
BPF_KPROBE(ipv4_bind_entry,struct socket * socket)116 int BPF_KPROBE(ipv4_bind_entry, struct socket *socket)
117 {
118 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
119 		return 0;
120 
121 	return probe_entry(ctx, socket);
122 }
123 
124 SEC("kretprobe/inet_bind")
BPF_KRETPROBE(ipv4_bind_exit)125 int BPF_KRETPROBE(ipv4_bind_exit)
126 {
127 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
128 		return 0;
129 
130 	return probe_exit(ctx, 4);
131 }
132 
133 SEC("kprobe/inet6_bind")
BPF_KPROBE(ipv6_bind_entry,struct socket * socket)134 int BPF_KPROBE(ipv6_bind_entry, struct socket *socket)
135 {
136 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
137 		return 0;
138 
139 	return probe_entry(ctx, socket);
140 }
141 
142 SEC("kretprobe/inet6_bind")
BPF_KRETPROBE(ipv6_bind_exit)143 int BPF_KRETPROBE(ipv6_bind_exit)
144 {
145 	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
146 		return 0;
147 
148 	return probe_exit(ctx, 6);
149 }
150 
151 char LICENSE[] SEC("license") = "Dual BSD/GPL";
152