1 /*author: https://github.com/agentzex
2 Licensed under the Apache License, Version 2.0 (the "License")
3
4 tcp_mon_block.c - uses netlink TC, kernel tracepoints and kprobes to monitor outgoing connections from given PIDs
5 and block connections to all addresses initiated from them (acting like an in-process firewall), unless they are listed in allow_list
6 */
7
8 #include <uapi/linux/bpf.h>
9 #include <uapi/linux/ptrace.h>
10 #include <linux/tcp.h>
11 #include <net/sock.h>
12 #include <net/inet_sock.h>
13 #include <linux/in.h>
14 #include <linux/if_ether.h>
15 #include <linux/if_packet.h>
16 #include <uapi/linux/if_ether.h>
17 #include <uapi/linux/ip.h>
18 #include <linux/tcp.h>
19 #include <uapi/linux/pkt_cls.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22
23
24 typedef struct
25 {
26 u32 src_ip;
27 u16 src_port;
28 u32 dst_ip;
29 u16 dst_port;
30 u32 pid;
31 u8 tcp_flags;
32 char comm[TASK_COMM_LEN];
33 } full_packet;
34
35
36 typedef struct
37 {
38 u8 state;
39 u32 src_ip;
40 u16 src_port;
41 u32 dst_ip;
42 u16 dst_port;
43 u32 pid;
44 char comm[TASK_COMM_LEN];
45 } verbose_event;
46
47
48 typedef struct
49 {
50 u32 src_ip;
51 u16 src_port;
52 u32 dst_ip;
53 u16 dst_port;
54 } key_hash;
55
56
57 BPF_HASH(monitored_connections, key_hash, full_packet);
58 BPF_HASH(allow_list, u32, u32);
59 BPF_HASH(pid_list, u32, u32);
60 BPF_PERF_OUTPUT(blocked_events);
61 BPF_PERF_OUTPUT(verbose_events);
62
63
64 #ifndef tcp_flag_byte
65 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
66 #endif
67
68
69 static bool VERBOSE_OUTPUT = false;
70
71
tcp_header_bound_check(struct tcphdr * tcp,void * data_end)72 static __always_inline int tcp_header_bound_check(struct tcphdr* tcp, void* data_end)
73 {
74 if ((void *)tcp + sizeof(*tcp) > data_end)
75 {
76 return -1;
77 }
78
79 return 0;
80 }
81
82
make_verbose_event(verbose_event * v,u32 src_ip,u32 dst_ip,u16 src_port,u16 dst_port,u32 pid,u8 state)83 static void make_verbose_event(verbose_event *v, u32 src_ip, u32 dst_ip, u16 src_port, u16 dst_port, u32 pid, u8 state)
84 {
85 v->src_ip = src_ip;
86 v->src_port = src_port;
87 v->dst_ip = dst_ip;
88 v->dst_port = dst_port;
89 v->pid = pid;
90 v->state = state;
91 bpf_get_current_comm(&v->comm, sizeof(v->comm));
92 }
93
94
handle_egress(struct __sk_buff * ctx)95 int handle_egress(struct __sk_buff *ctx)
96 {
97 void* data_end = (void*)(long)ctx->data_end;
98 void* data = (void*)(long)ctx->data;
99 struct ethhdr *eth = data;
100 struct iphdr *ip = data + sizeof(*eth);
101 struct tcphdr *tcp;
102 key_hash key = {};
103
104 /* length check */
105 if (data + sizeof(*eth) + sizeof(*ip) > data_end)
106 {
107 return TC_ACT_OK;
108 }
109
110 if (eth->h_proto != htons(ETH_P_IP))
111 {
112 return TC_ACT_OK;
113 }
114
115 if (ip->protocol != IPPROTO_TCP)
116 {
117 return TC_ACT_OK;
118 }
119
120 tcp = (void *)ip + sizeof(*ip);
121 if (tcp_header_bound_check(tcp, data_end))
122 {
123 return TC_ACT_OK;
124 }
125
126 u8 tcpflags = ((u_int8_t *)tcp)[13];
127 u16 src_port = bpf_ntohs(tcp->source);
128 u16 dst_port = bpf_ntohs(tcp->dest);
129
130 key.src_ip = ip->saddr;
131 key.src_port = src_port;
132 key.dst_ip = ip->daddr;
133 key.dst_port = dst_port;
134
135 full_packet *packet_value;
136 packet_value = monitored_connections.lookup(&key);
137 if (packet_value != 0)
138 {
139 packet_value->tcp_flags = tcpflags;
140 blocked_events.perf_submit(ctx, packet_value, sizeof(full_packet));
141 return TC_ACT_SHOT;
142 }
143
144 return TC_ACT_OK;
145 }
146
147
148 // Removing the entry from monitored_connections when the socket closes after failed connection
TRACEPOINT_PROBE(sock,inet_sock_set_state)149 TRACEPOINT_PROBE(sock, inet_sock_set_state)
150 {
151 if (args->protocol != IPPROTO_TCP)
152 {
153 return 0;
154 }
155
156 if (args->newstate != TCP_CLOSE && args->newstate != TCP_CLOSE_WAIT)
157 {
158 return 0;
159 }
160
161 if (args->family == AF_INET)
162 {
163 key_hash key = {};
164 struct sock *sk = (struct sock *)args->skaddr;
165
166 key.src_port = args->sport;
167 key.dst_port = args->dport;
168 __builtin_memcpy(&key.src_ip, args->saddr, sizeof(key.src_ip));
169 __builtin_memcpy(&key.dst_ip, args->daddr, sizeof(key.dst_ip));
170
171 full_packet *packet_value;
172 packet_value = monitored_connections.lookup(&key);
173 if (packet_value != 0)
174 {
175 monitored_connections.delete(&key);
176 if (VERBOSE_OUTPUT)
177 {
178 verbose_event v = {};
179 make_verbose_event(&v, packet_value->src_ip, packet_value->dst_ip, packet_value->src_port, packet_value->dst_port, packet_value->pid, 3);
180 verbose_events.perf_submit(args, &v, sizeof(v));
181 }
182
183 }
184 }
185
186 return 0;
187 }
188
189
190
191
trace_connect_entry(struct pt_regs * ctx,struct sock * sk)192 int trace_connect_entry(struct pt_regs *ctx, struct sock *sk)
193 {
194 key_hash key = {};
195 full_packet packet_value = {};
196 u8 verbose_state = 0;
197
198 u16 family = sk->__sk_common.skc_family;
199 if (family != AF_INET)
200 {
201 return 0;
202 }
203
204 u32 pid = bpf_get_current_pid_tgid() >> 32;
205 u16 dst_port = sk->__sk_common.skc_dport;
206 dst_port = ntohs(dst_port);
207 u16 src_port = sk->__sk_common.skc_num;
208 u32 src_ip = sk->__sk_common.skc_rcv_saddr;
209 u32 dst_ip = sk->__sk_common.skc_daddr;
210
211 u32 *monitored_pid = pid_list.lookup(&pid);
212 if (!monitored_pid)
213 {
214 return 0;
215 }
216
217 u32 *allowed_ip = allow_list.lookup(&dst_ip);
218 if (!allowed_ip)
219 {
220 key.src_ip = src_ip;
221 key.src_port = src_port;
222 key.dst_ip = dst_ip;
223 key.dst_port = dst_port;
224
225 packet_value.src_ip = src_ip;
226 packet_value.src_port = src_port;
227 packet_value.dst_ip = dst_ip;
228 packet_value.dst_port = dst_port;
229 packet_value.pid = pid;
230 bpf_get_current_comm(&packet_value.comm, sizeof(packet_value.comm));
231 verbose_state = 1;
232 monitored_connections.update(&key, &packet_value);
233 }
234 else
235 {
236 verbose_state = 2;
237 }
238
239 if (VERBOSE_OUTPUT)
240 {
241 verbose_event v = {};
242 make_verbose_event(&v, src_ip, dst_ip, src_port, dst_port, pid, verbose_state);
243 verbose_events.perf_submit(ctx, &v, sizeof(v));
244 }
245
246 return 0;
247 }