1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2021 Hengqi Chen */
3 #include <vmlinux.h>
4 #include <bpf/bpf_helpers.h>
5 #include <bpf/bpf_core_read.h>
6 #include <bpf/bpf_tracing.h>
7 #include "gethostlatency.h"
8
9 #define MAX_ENTRIES 10240
10
11 const volatile pid_t target_pid = 0;
12
13 struct {
14 __uint(type, BPF_MAP_TYPE_HASH);
15 __uint(max_entries, MAX_ENTRIES);
16 __type(key, __u32);
17 __type(value, struct event);
18 } starts SEC(".maps");
19
20 struct {
21 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
22 __uint(key_size, sizeof(__u32));
23 __uint(value_size, sizeof(__u32));
24 } events SEC(".maps");
25
probe_entry(struct pt_regs * ctx)26 static int probe_entry(struct pt_regs *ctx)
27 {
28 if (!PT_REGS_PARM1(ctx))
29 return 0;
30
31 __u64 pid_tgid = bpf_get_current_pid_tgid();
32 __u32 pid = pid_tgid >> 32;
33 __u32 tid = (__u32)pid_tgid;
34 struct event event = {};
35
36 if (target_pid && target_pid != pid)
37 return 0;
38
39 event.time = bpf_ktime_get_ns();
40 event.pid = pid;
41 bpf_get_current_comm(&event.comm, sizeof(event.comm));
42 bpf_probe_read_user(&event.host, sizeof(event.host), (void *)PT_REGS_PARM1(ctx));
43 bpf_map_update_elem(&starts, &tid, &event, BPF_ANY);
44 return 0;
45 }
46
probe_return(struct pt_regs * ctx)47 static int probe_return(struct pt_regs *ctx)
48 {
49 __u32 tid = (__u32)bpf_get_current_pid_tgid();
50 struct event *eventp;
51
52 eventp = bpf_map_lookup_elem(&starts, &tid);
53 if (!eventp)
54 return 0;
55
56 /* update time from timestamp to delta */
57 eventp->time = bpf_ktime_get_ns() - eventp->time;
58 bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, eventp, sizeof(*eventp));
59 bpf_map_delete_elem(&starts, &tid);
60 return 0;
61 }
62
63 SEC("kprobe/handle_entry")
BPF_KPROBE(handle_entry)64 int BPF_KPROBE(handle_entry)
65 {
66 return probe_entry(ctx);
67 }
68
69 SEC("kretprobe/handle_return")
BPF_KRETPROBE(handle_return)70 int BPF_KRETPROBE(handle_return)
71 {
72 return probe_return(ctx);
73 }
74
75 char LICENSE[] SEC("license") = "GPL";
76