1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Wenbo Zhang
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "tcpconnlat.h"
8
9 #define AF_INET 2
10 #define AF_INET6 10
11
12 const volatile __u64 targ_min_us = 0;
13 const volatile pid_t targ_tgid = 0;
14
15 struct piddata {
16 char comm[TASK_COMM_LEN];
17 u64 ts;
18 u32 tgid;
19 };
20
21 struct {
22 __uint(type, BPF_MAP_TYPE_HASH);
23 __uint(max_entries, 4096);
24 __type(key, struct sock *);
25 __type(value, struct piddata);
26 } start SEC(".maps");
27
28 struct {
29 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
30 __uint(key_size, sizeof(u32));
31 __uint(value_size, sizeof(u32));
32 } events SEC(".maps");
33
trace_connect(struct sock * sk)34 static int trace_connect(struct sock *sk)
35 {
36 u32 tgid = bpf_get_current_pid_tgid() >> 32;
37 struct piddata piddata = {};
38
39 if (targ_tgid && targ_tgid != tgid)
40 return 0;
41
42 bpf_get_current_comm(&piddata.comm, sizeof(piddata.comm));
43 piddata.ts = bpf_ktime_get_ns();
44 piddata.tgid = tgid;
45 bpf_map_update_elem(&start, &sk, &piddata, 0);
46 return 0;
47 }
48
handle_tcp_rcv_state_process(void * ctx,struct sock * sk)49 static int handle_tcp_rcv_state_process(void *ctx, struct sock *sk)
50 {
51 struct piddata *piddatap;
52 struct event event = {};
53 s64 delta;
54 u64 ts;
55
56 if (BPF_CORE_READ(sk, __sk_common.skc_state) != TCP_SYN_SENT)
57 return 0;
58
59 piddatap = bpf_map_lookup_elem(&start, &sk);
60 if (!piddatap)
61 return 0;
62
63 ts = bpf_ktime_get_ns();
64 delta = (s64)(ts - piddatap->ts);
65 if (delta < 0)
66 goto cleanup;
67
68 event.delta_us = delta / 1000U;
69 if (targ_min_us && event.delta_us < targ_min_us)
70 goto cleanup;
71 __builtin_memcpy(&event.comm, piddatap->comm,
72 sizeof(event.comm));
73 event.ts_us = ts / 1000;
74 event.tgid = piddatap->tgid;
75 event.lport = BPF_CORE_READ(sk, __sk_common.skc_num);
76 event.dport = BPF_CORE_READ(sk, __sk_common.skc_dport);
77 event.af = BPF_CORE_READ(sk, __sk_common.skc_family);
78 if (event.af == AF_INET) {
79 event.saddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
80 event.daddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_daddr);
81 } else {
82 BPF_CORE_READ_INTO(&event.saddr_v6, sk,
83 __sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
84 BPF_CORE_READ_INTO(&event.daddr_v6, sk,
85 __sk_common.skc_v6_daddr.in6_u.u6_addr32);
86 }
87 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
88 &event, sizeof(event));
89
90 cleanup:
91 bpf_map_delete_elem(&start, &sk);
92 return 0;
93 }
94
95 SEC("kprobe/tcp_v4_connect")
BPF_KPROBE(tcp_v4_connect,struct sock * sk)96 int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
97 {
98 return trace_connect(sk);
99 }
100
101 SEC("kprobe/tcp_v6_connect")
BPF_KPROBE(tcp_v6_connect,struct sock * sk)102 int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
103 {
104 return trace_connect(sk);
105 }
106
107 SEC("kprobe/tcp_rcv_state_process")
BPF_KPROBE(tcp_rcv_state_process,struct sock * sk)108 int BPF_KPROBE(tcp_rcv_state_process, struct sock *sk)
109 {
110 return handle_tcp_rcv_state_process(ctx, sk);
111 }
112
113 SEC("tracepoint/tcp/tcp_destroy_sock")
tcp_destroy_sock(struct trace_event_raw_tcp_event_sk * ctx)114 int tcp_destroy_sock(struct trace_event_raw_tcp_event_sk *ctx)
115 {
116 const struct sock *sk = ctx->skaddr;
117
118 bpf_map_delete_elem(&start, &sk);
119 return 0;
120 }
121
122 SEC("fentry/tcp_v4_connect")
BPF_PROG(fentry_tcp_v4_connect,struct sock * sk)123 int BPF_PROG(fentry_tcp_v4_connect, struct sock *sk)
124 {
125 return trace_connect(sk);
126 }
127
128 SEC("fentry/tcp_v6_connect")
BPF_PROG(fentry_tcp_v6_connect,struct sock * sk)129 int BPF_PROG(fentry_tcp_v6_connect, struct sock *sk)
130 {
131 return trace_connect(sk);
132 }
133
134 SEC("fentry/tcp_rcv_state_process")
BPF_PROG(fentry_tcp_rcv_state_process,struct sock * sk)135 int BPF_PROG(fentry_tcp_rcv_state_process, struct sock *sk)
136 {
137 return handle_tcp_rcv_state_process(ctx, sk);
138 }
139
140 char LICENSE[] SEC("license") = "GPL";
141