1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 // Copyright (c) 2023 Meta Platforms, Inc. and affiliates.
3 //
4 // Based on memleak(8) from BCC by Sasha Goldshtein and others.
5 // 1-Mar-2023 JP Kobryn Created this.
6 #include <argp.h>
7 #include <errno.h>
8 #include <signal.h>
9 #include <stdbool.h>
10 #include <stdio.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/eventfd.h>
15 #include <sys/types.h>
16 #include <sys/wait.h>
17 #include <time.h>
18 #include <unistd.h>
19
20 #include <bpf/libbpf.h>
21 #include <bpf/bpf.h>
22
23 #include "memleak.h"
24 #include "memleak.skel.h"
25 #include "trace_helpers.h"
26
27 #ifdef USE_BLAZESYM
28 #include "blazesym.h"
29 #endif
30
31 static struct env {
32 int interval;
33 int nr_intervals;
34 pid_t pid;
35 bool trace_all;
36 bool show_allocs;
37 bool combined_only;
38 int min_age_ns;
39 uint64_t sample_rate;
40 int top_stacks;
41 size_t min_size;
42 size_t max_size;
43 char object[32];
44
45 bool wa_missing_free;
46 bool percpu;
47 int perf_max_stack_depth;
48 int stack_map_max_entries;
49 long page_size;
50 bool kernel_trace;
51 bool verbose;
52 char command[32];
53 } env = {
54 .interval = 5, // posarg 1
55 .nr_intervals = -1, // posarg 2
56 .pid = -1, // -p --pid
57 .trace_all = false, // -t --trace
58 .show_allocs = false, // -a --show-allocs
59 .combined_only = false, // --combined-only
60 .min_age_ns = 500, // -o --older (arg * 1e6)
61 .wa_missing_free = false, // --wa-missing-free
62 .sample_rate = 1, // -s --sample-rate
63 .top_stacks = 10, // -T --top
64 .min_size = 0, // -z --min-size
65 .max_size = -1, // -Z --max-size
66 .object = {0}, // -O --obj
67 .percpu = false, // --percpu
68 .perf_max_stack_depth = 127,
69 .stack_map_max_entries = 10240,
70 .page_size = 1,
71 .kernel_trace = true,
72 .verbose = false,
73 .command = {0}, // -c --command
74 };
75
76 struct allocation_node {
77 uint64_t address;
78 size_t size;
79 struct allocation_node* next;
80 };
81
82 struct allocation {
83 uint64_t stack_id;
84 size_t size;
85 size_t count;
86 struct allocation_node* allocations;
87 };
88
89 #define __ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe) \
90 do { \
91 LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts, \
92 .func_name = #sym_name, \
93 .retprobe = is_retprobe); \
94 skel->links.prog_name = bpf_program__attach_uprobe_opts( \
95 skel->progs.prog_name, \
96 env.pid, \
97 env.object, \
98 0, \
99 &uprobe_opts); \
100 } while (false)
101
102 #define __CHECK_PROGRAM(skel, prog_name) \
103 do { \
104 if (!skel->links.prog_name) { \
105 perror("no program attached for " #prog_name); \
106 return -errno; \
107 } \
108 } while (false)
109
110 #define __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, is_retprobe) \
111 do { \
112 __ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe); \
113 __CHECK_PROGRAM(skel, prog_name); \
114 } while (false)
115
116 #define ATTACH_UPROBE(skel, sym_name, prog_name) __ATTACH_UPROBE(skel, sym_name, prog_name, false)
117 #define ATTACH_URETPROBE(skel, sym_name, prog_name) __ATTACH_UPROBE(skel, sym_name, prog_name, true)
118
119 #define ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name) __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, false)
120 #define ATTACH_URETPROBE_CHECKED(skel, sym_name, prog_name) __ATTACH_UPROBE_CHECKED(skel, sym_name, prog_name, true)
121
122 static void sig_handler(int signo);
123
124 static long argp_parse_long(int key, const char *arg, struct argp_state *state);
125 static error_t argp_parse_arg(int key, char *arg, struct argp_state *state);
126
127 static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args);
128
129 static int event_init(int *fd);
130 static int event_wait(int fd, uint64_t expected_event);
131 static int event_notify(int fd, uint64_t event);
132
133 static pid_t fork_sync_exec(const char *command, int fd);
134
135 #ifdef USE_BLAZESYM
136 static void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym);
137 static void print_stack_frames_by_blazesym();
138 #else
139 static void print_stack_frames_by_ksyms();
140 static void print_stack_frames_by_syms_cache();
141 #endif
142 static int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd);
143
144 static int alloc_size_compare(const void *a, const void *b);
145
146 static int print_outstanding_allocs(int allocs_fd, int stack_traces_fd);
147 static int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_fd);
148
149 static bool has_kernel_node_tracepoints();
150 static void disable_kernel_node_tracepoints(struct memleak_bpf *skel);
151 static void disable_kernel_percpu_tracepoints(struct memleak_bpf *skel);
152 static void disable_kernel_tracepoints(struct memleak_bpf *skel);
153
154 static int attach_uprobes(struct memleak_bpf *skel);
155
156 const char *argp_program_version = "memleak 0.1";
157 const char *argp_program_bug_address =
158 "https://github.com/iovisor/bcc/tree/master/libbpf-tools";
159
160 const char argp_args_doc[] =
161 "Trace outstanding memory allocations\n"
162 "\n"
163 "USAGE: memleak [-h] [-c COMMAND] [-p PID] [-t] [-n] [-a] [-o AGE_MS] [-C] [-F] [-s SAMPLE_RATE] [-T TOP_STACKS] [-z MIN_SIZE] [-Z MAX_SIZE] [-O OBJECT] [-P] [INTERVAL] [INTERVALS]\n"
164 "\n"
165 "EXAMPLES:\n"
166 "./memleak -p $(pidof allocs)\n"
167 " Trace allocations and display a summary of 'leaked' (outstanding)\n"
168 " allocations every 5 seconds\n"
169 "./memleak -p $(pidof allocs) -t\n"
170 " Trace allocations and display each individual allocator function call\n"
171 "./memleak -ap $(pidof allocs) 10\n"
172 " Trace allocations and display allocated addresses, sizes, and stacks\n"
173 " every 10 seconds for outstanding allocations\n"
174 "./memleak -c './allocs'\n"
175 " Run the specified command and trace its allocations\n"
176 "./memleak\n"
177 " Trace allocations in kernel mode and display a summary of outstanding\n"
178 " allocations every 5 seconds\n"
179 "./memleak -o 60000\n"
180 " Trace allocations in kernel mode and display a summary of outstanding\n"
181 " allocations that are at least one minute (60 seconds) old\n"
182 "./memleak -s 5\n"
183 " Trace roughly every 5th allocation, to reduce overhead\n"
184 "";
185
186 static const struct argp_option argp_options[] = {
187 // name/longopt:str, key/shortopt:int, arg:str, flags:int, doc:str
188 {"pid", 'p', "PID", 0, "process ID to trace. if not specified, trace kernel allocs"},
189 {"trace", 't', 0, 0, "print trace messages for each alloc/free call" },
190 {"show-allocs", 'a', 0, 0, "show allocation addresses and sizes as well as call stacks"},
191 {"older", 'o', "AGE_MS", 0, "prune allocations younger than this age in milliseconds"},
192 {"command", 'c', "COMMAND", 0, "execute and trace the specified command"},
193 {"combined-only", 'C', 0, 0, "show combined allocation statistics only"},
194 {"wa-missing-free", 'F', 0, 0, "workaround to alleviate misjudgments when free is missing"},
195 {"sample-rate", 's', "SAMPLE_RATE", 0, "sample every N-th allocation to decrease the overhead"},
196 {"top", 'T', "TOP_STACKS", 0, "display only this many top allocating stacks (by size)"},
197 {"min-size", 'z', "MIN_SIZE", 0, "capture only allocations larger than this size"},
198 {"max-size", 'Z', "MAX_SIZE", 0, "capture only allocations smaller than this size"},
199 {"obj", 'O', "OBJECT", 0, "attach to allocator functions in the specified object"},
200 {"percpu", 'P', NULL, 0, "trace percpu allocations"},
201 {},
202 };
203
204 static volatile sig_atomic_t exiting;
205 static volatile sig_atomic_t child_exited;
206
207 static struct sigaction sig_action = {
208 .sa_handler = sig_handler
209 };
210
211 static int child_exec_event_fd = -1;
212
213 #ifdef USE_BLAZESYM
214 static blazesym *symbolizer;
215 static sym_src_cfg src_cfg;
216 #else
217 struct syms_cache *syms_cache;
218 struct ksyms *ksyms;
219 #endif
220 static void (*print_stack_frames_func)();
221
222 static uint64_t *stack;
223
224 static struct allocation *allocs;
225
226 static const char default_object[] = "libc.so.6";
227
main(int argc,char * argv[])228 int main(int argc, char *argv[])
229 {
230 int ret = 0;
231 struct memleak_bpf *skel = NULL;
232
233 static const struct argp argp = {
234 .options = argp_options,
235 .parser = argp_parse_arg,
236 .doc = argp_args_doc,
237 };
238
239 // parse command line args to env settings
240 if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) {
241 fprintf(stderr, "failed to parse args\n");
242
243 goto cleanup;
244 }
245
246 // install signal handler
247 if (sigaction(SIGINT, &sig_action, NULL) || sigaction(SIGCHLD, &sig_action, NULL)) {
248 perror("failed to set up signal handling");
249 ret = -errno;
250
251 goto cleanup;
252 }
253
254 // post-processing and validation of env settings
255 if (env.min_size > env.max_size) {
256 fprintf(stderr, "min size (-z) can't be greater than max_size (-Z)\n");
257 return 1;
258 }
259
260 if (!strlen(env.object)) {
261 printf("using default object: %s\n", default_object);
262 strncpy(env.object, default_object, sizeof(env.object) - 1);
263 }
264
265 env.page_size = sysconf(_SC_PAGE_SIZE);
266 printf("using page size: %ld\n", env.page_size);
267
268 env.kernel_trace = env.pid < 0 && !strlen(env.command);
269 printf("tracing kernel: %s\n", env.kernel_trace ? "true" : "false");
270
271 // if specific userspace program was specified,
272 // create the child process and use an eventfd to synchronize the call to exec()
273 if (strlen(env.command)) {
274 if (env.pid >= 0) {
275 fprintf(stderr, "cannot specify both command and pid\n");
276 ret = 1;
277
278 goto cleanup;
279 }
280
281 if (event_init(&child_exec_event_fd)) {
282 fprintf(stderr, "failed to init child event\n");
283
284 goto cleanup;
285 }
286
287 const pid_t child_pid = fork_sync_exec(env.command, child_exec_event_fd);
288 if (child_pid < 0) {
289 perror("failed to spawn child process");
290 ret = -errno;
291
292 goto cleanup;
293 }
294
295 env.pid = child_pid;
296 }
297
298 // allocate space for storing a stack trace
299 stack = calloc(env.perf_max_stack_depth, sizeof(*stack));
300 if (!stack) {
301 fprintf(stderr, "failed to allocate stack array\n");
302 ret = -ENOMEM;
303
304 goto cleanup;
305 }
306
307 #ifdef USE_BLAZESYM
308 if (env.pid < 0) {
309 src_cfg.src_type = SRC_T_KERNEL;
310 src_cfg.params.kernel.kallsyms = NULL;
311 src_cfg.params.kernel.kernel_image = NULL;
312 } else {
313 src_cfg.src_type = SRC_T_PROCESS;
314 src_cfg.params.process.pid = env.pid;
315 }
316 #endif
317
318 // allocate space for storing "allocation" structs
319 if (env.combined_only)
320 allocs = calloc(COMBINED_ALLOCS_MAX_ENTRIES, sizeof(*allocs));
321 else
322 allocs = calloc(ALLOCS_MAX_ENTRIES, sizeof(*allocs));
323
324 if (!allocs) {
325 fprintf(stderr, "failed to allocate array\n");
326 ret = -ENOMEM;
327
328 goto cleanup;
329 }
330
331 libbpf_set_print(libbpf_print_fn);
332
333 skel = memleak_bpf__open();
334 if (!skel) {
335 fprintf(stderr, "failed to open bpf object\n");
336 ret = 1;
337
338 goto cleanup;
339 }
340
341 skel->rodata->min_size = env.min_size;
342 skel->rodata->max_size = env.max_size;
343 skel->rodata->page_size = env.page_size;
344 skel->rodata->sample_rate = env.sample_rate;
345 skel->rodata->trace_all = env.trace_all;
346 skel->rodata->stack_flags = env.kernel_trace ? 0 : BPF_F_USER_STACK;
347 skel->rodata->wa_missing_free = env.wa_missing_free;
348
349 bpf_map__set_value_size(skel->maps.stack_traces,
350 env.perf_max_stack_depth * sizeof(unsigned long));
351 bpf_map__set_max_entries(skel->maps.stack_traces, env.stack_map_max_entries);
352
353 // disable kernel tracepoints based on settings or availability
354 if (env.kernel_trace) {
355 if (!has_kernel_node_tracepoints())
356 disable_kernel_node_tracepoints(skel);
357
358 if (!env.percpu)
359 disable_kernel_percpu_tracepoints(skel);
360 } else {
361 disable_kernel_tracepoints(skel);
362 }
363
364 ret = memleak_bpf__load(skel);
365 if (ret) {
366 fprintf(stderr, "failed to load bpf object\n");
367
368 goto cleanup;
369 }
370
371 const int allocs_fd = bpf_map__fd(skel->maps.allocs);
372 const int combined_allocs_fd = bpf_map__fd(skel->maps.combined_allocs);
373 const int stack_traces_fd = bpf_map__fd(skel->maps.stack_traces);
374
375 // if userspace oriented, attach upbrobes
376 if (!env.kernel_trace) {
377 ret = attach_uprobes(skel);
378 if (ret) {
379 fprintf(stderr, "failed to attach uprobes\n");
380
381 goto cleanup;
382 }
383 }
384
385 ret = memleak_bpf__attach(skel);
386 if (ret) {
387 fprintf(stderr, "failed to attach bpf program(s)\n");
388
389 goto cleanup;
390 }
391
392 // if running a specific userspace program,
393 // notify the child process that it can exec its program
394 if (strlen(env.command)) {
395 ret = event_notify(child_exec_event_fd, 1);
396 if (ret) {
397 fprintf(stderr, "failed to notify child to perform exec\n");
398
399 goto cleanup;
400 }
401 }
402
403 #ifdef USE_BLAZESYM
404 symbolizer = blazesym_new();
405 if (!symbolizer) {
406 fprintf(stderr, "Failed to load blazesym\n");
407 ret = -ENOMEM;
408
409 goto cleanup;
410 }
411 print_stack_frames_func = print_stack_frames_by_blazesym;
412 #else
413 if (env.kernel_trace) {
414 ksyms = ksyms__load();
415 if (!ksyms) {
416 fprintf(stderr, "Failed to load ksyms\n");
417 ret = -ENOMEM;
418
419 goto cleanup;
420 }
421 print_stack_frames_func = print_stack_frames_by_ksyms;
422 } else {
423 syms_cache = syms_cache__new(0);
424 if (!syms_cache) {
425 fprintf(stderr, "Failed to create syms_cache\n");
426 ret = -ENOMEM;
427
428 goto cleanup;
429 }
430 print_stack_frames_func = print_stack_frames_by_syms_cache;
431 }
432 #endif
433
434 printf("Tracing outstanding memory allocs... Hit Ctrl-C to end\n");
435
436 // main loop
437 while (!exiting && env.nr_intervals) {
438 env.nr_intervals--;
439
440 sleep(env.interval);
441
442 if (env.combined_only)
443 print_outstanding_combined_allocs(combined_allocs_fd, stack_traces_fd);
444 else
445 print_outstanding_allocs(allocs_fd, stack_traces_fd);
446 }
447
448 // after loop ends, check for child process and cleanup accordingly
449 if (env.pid > 0 && strlen(env.command)) {
450 if (!child_exited) {
451 if (kill(env.pid, SIGTERM)) {
452 perror("failed to signal child process");
453 ret = -errno;
454
455 goto cleanup;
456 }
457 printf("signaled child process\n");
458 }
459
460 if (waitpid(env.pid, NULL, 0) < 0) {
461 perror("failed to reap child process");
462 ret = -errno;
463
464 goto cleanup;
465 }
466 printf("reaped child process\n");
467 }
468
469 cleanup:
470 #ifdef USE_BLAZESYM
471 blazesym_free(symbolizer);
472 #else
473 if (syms_cache)
474 syms_cache__free(syms_cache);
475 if (ksyms)
476 ksyms__free(ksyms);
477 #endif
478 memleak_bpf__destroy(skel);
479
480 free(allocs);
481 free(stack);
482
483 printf("done\n");
484
485 return ret;
486 }
487
argp_parse_long(int key,const char * arg,struct argp_state * state)488 long argp_parse_long(int key, const char *arg, struct argp_state *state)
489 {
490 errno = 0;
491 const long temp = strtol(arg, NULL, 10);
492 if (errno || temp <= 0) {
493 fprintf(stderr, "error arg:%c %s\n", (char)key, arg);
494 argp_usage(state);
495 }
496
497 return temp;
498 }
499
argp_parse_arg(int key,char * arg,struct argp_state * state)500 error_t argp_parse_arg(int key, char *arg, struct argp_state *state)
501 {
502 static int pos_args = 0;
503
504 switch (key) {
505 case 'p':
506 env.pid = atoi(arg);
507 break;
508 case 't':
509 env.trace_all = true;
510 break;
511 case 'a':
512 env.show_allocs = true;
513 break;
514 case 'o':
515 env.min_age_ns = 1e6 * atoi(arg);
516 break;
517 case 'c':
518 strncpy(env.command, arg, sizeof(env.command) - 1);
519 break;
520 case 'C':
521 env.combined_only = true;
522 break;
523 case 'F':
524 env.wa_missing_free = true;
525 break;
526 case 's':
527 env.sample_rate = argp_parse_long(key, arg, state);
528 break;
529 case 'T':
530 env.top_stacks = atoi(arg);
531 break;
532 case 'z':
533 env.min_size = argp_parse_long(key, arg, state);
534 break;
535 case 'Z':
536 env.max_size = argp_parse_long(key, arg, state);
537 break;
538 case 'O':
539 strncpy(env.object, arg, sizeof(env.object) - 1);
540 break;
541 case 'P':
542 env.percpu = true;
543 break;
544 case ARGP_KEY_ARG:
545 pos_args++;
546
547 if (pos_args == 1) {
548 env.interval = argp_parse_long(key, arg, state);
549 }
550 else if (pos_args == 2) {
551 env.nr_intervals = argp_parse_long(key, arg, state);
552 } else {
553 fprintf(stderr, "Unrecognized positional argument: %s\n", arg);
554 argp_usage(state);
555 }
556
557 break;
558 default:
559 return ARGP_ERR_UNKNOWN;
560 }
561
562 return 0;
563 }
564
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)565 int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args)
566 {
567 if (level == LIBBPF_DEBUG && !env.verbose)
568 return 0;
569
570 return vfprintf(stderr, format, args);
571 }
572
sig_handler(int signo)573 void sig_handler(int signo)
574 {
575 if (signo == SIGCHLD)
576 child_exited = 1;
577
578 exiting = 1;
579 }
580
event_init(int * fd)581 int event_init(int *fd)
582 {
583 if (!fd) {
584 fprintf(stderr, "pointer to fd is null\n");
585
586 return 1;
587 }
588
589 const int tmp_fd = eventfd(0, EFD_CLOEXEC);
590 if (tmp_fd < 0) {
591 perror("failed to create event fd");
592
593 return -errno;
594 }
595
596 *fd = tmp_fd;
597
598 return 0;
599 }
600
event_wait(int fd,uint64_t expected_event)601 int event_wait(int fd, uint64_t expected_event)
602 {
603 uint64_t event = 0;
604 const ssize_t bytes = read(fd, &event, sizeof(event));
605 if (bytes < 0) {
606 perror("failed to read from fd");
607
608 return -errno;
609 } else if (bytes != sizeof(event)) {
610 fprintf(stderr, "read unexpected size\n");
611
612 return 1;
613 }
614
615 if (event != expected_event) {
616 fprintf(stderr, "read event %lu, expected %lu\n", event, expected_event);
617
618 return 1;
619 }
620
621 return 0;
622 }
623
event_notify(int fd,uint64_t event)624 int event_notify(int fd, uint64_t event)
625 {
626 const ssize_t bytes = write(fd, &event, sizeof(event));
627 if (bytes < 0) {
628 perror("failed to write to fd");
629
630 return -errno;
631 } else if (bytes != sizeof(event)) {
632 fprintf(stderr, "attempted to write %zu bytes, wrote %zd bytes\n", sizeof(event), bytes);
633
634 return 1;
635 }
636
637 return 0;
638 }
639
fork_sync_exec(const char * command,int fd)640 pid_t fork_sync_exec(const char *command, int fd)
641 {
642 const pid_t pid = fork();
643
644 switch (pid) {
645 case -1:
646 perror("failed to create child process");
647 break;
648 case 0: {
649 const uint64_t event = 1;
650 if (event_wait(fd, event)) {
651 fprintf(stderr, "failed to wait on event");
652 exit(EXIT_FAILURE);
653 }
654
655 printf("received go event. executing child command\n");
656
657 const int err = execl(command, command, NULL);
658 if (err) {
659 perror("failed to execute child command");
660 return -1;
661 }
662
663 break;
664 }
665 default:
666 printf("child created with pid: %d\n", pid);
667
668 break;
669 }
670
671 return pid;
672 }
673
674 #if USE_BLAZESYM
print_stack_frame_by_blazesym(size_t frame,uint64_t addr,const blazesym_csym * sym)675 void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym)
676 {
677 if (!sym)
678 printf("\t%zu [<%016lx>] <%s>\n", frame, addr, "null sym");
679 else if (sym->path && strlen(sym->path))
680 printf("\t%zu [<%016lx>] %s+0x%lx %s:%ld\n", frame, addr, sym->symbol, addr - sym->start_address, sym->path, sym->line_no);
681 else
682 printf("\t%zu [<%016lx>] %s+0x%lx\n", frame, addr, sym->symbol, addr - sym->start_address);
683 }
684
print_stack_frames_by_blazesym()685 void print_stack_frames_by_blazesym()
686 {
687 const blazesym_result *result = blazesym_symbolize(symbolizer, &src_cfg, 1, stack, env.perf_max_stack_depth);
688
689 for (size_t j = 0; j < result->size; ++j) {
690 const uint64_t addr = stack[j];
691
692 if (addr == 0)
693 break;
694
695 // no symbol found
696 if (!result || j >= result->size || result->entries[j].size == 0) {
697 print_stack_frame_by_blazesym(j, addr, NULL);
698
699 continue;
700 }
701
702 // single symbol found
703 if (result->entries[j].size == 1) {
704 const blazesym_csym *sym = &result->entries[j].syms[0];
705 print_stack_frame_by_blazesym(j, addr, sym);
706
707 continue;
708 }
709
710 // multi symbol found
711 printf("\t%zu [<%016lx>] (%lu entries)\n", j, addr, result->entries[j].size);
712
713 for (size_t k = 0; k < result->entries[j].size; ++k) {
714 const blazesym_csym *sym = &result->entries[j].syms[k];
715 if (sym->path && strlen(sym->path))
716 printf("\t\t%s@0x%lx %s:%ld\n", sym->symbol, sym->start_address, sym->path, sym->line_no);
717 else
718 printf("\t\t%s@0x%lx\n", sym->symbol, sym->start_address);
719 }
720 }
721
722 blazesym_result_free(result);
723 }
724 #else
print_stack_frames_by_ksyms()725 void print_stack_frames_by_ksyms()
726 {
727 for (size_t i = 0; i < env.perf_max_stack_depth; ++i) {
728 const uint64_t addr = stack[i];
729
730 if (addr == 0)
731 break;
732
733 const struct ksym *ksym = ksyms__map_addr(ksyms, addr);
734 if (ksym)
735 printf("\t%zu [<%016lx>] %s+0x%lx\n", i, addr, ksym->name, addr - ksym->addr);
736 else
737 printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym");
738 }
739 }
740
print_stack_frames_by_syms_cache()741 void print_stack_frames_by_syms_cache()
742 {
743 const struct syms *syms = syms_cache__get_syms(syms_cache, env.pid);
744 if (!syms) {
745 fprintf(stderr, "Failed to get syms\n");
746 return;
747 }
748
749 for (size_t i = 0; i < env.perf_max_stack_depth; ++i) {
750 const uint64_t addr = stack[i];
751
752 if (addr == 0)
753 break;
754
755 char *dso_name;
756 uint64_t dso_offset;
757 const struct sym *sym = syms__map_addr_dso(syms, addr, &dso_name, &dso_offset);
758 if (sym) {
759 printf("\t%zu [<%016lx>] %s+0x%lx", i, addr, sym->name, sym->offset);
760 if (dso_name)
761 printf(" [%s]", dso_name);
762 printf("\n");
763 } else {
764 printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym");
765 }
766 }
767 }
768 #endif
769
print_stack_frames(struct allocation * allocs,size_t nr_allocs,int stack_traces_fd)770 int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd)
771 {
772 for (size_t i = 0; i < nr_allocs; ++i) {
773 const struct allocation *alloc = &allocs[i];
774
775 printf("%zu bytes in %zu allocations from stack\n", alloc->size, alloc->count);
776
777 if (env.show_allocs) {
778 struct allocation_node* it = alloc->allocations;
779 while (it != NULL) {
780 printf("\taddr = %#lx size = %zu\n", it->address, it->size);
781 it = it->next;
782 }
783 }
784
785 if (bpf_map_lookup_elem(stack_traces_fd, &alloc->stack_id, stack)) {
786 if (errno == ENOENT)
787 continue;
788
789 perror("failed to lookup stack trace");
790
791 return -errno;
792 }
793
794 (*print_stack_frames_func)();
795 }
796
797 return 0;
798 }
799
alloc_size_compare(const void * a,const void * b)800 int alloc_size_compare(const void *a, const void *b)
801 {
802 const struct allocation *x = (struct allocation *)a;
803 const struct allocation *y = (struct allocation *)b;
804
805 // descending order
806
807 if (x->size > y->size)
808 return -1;
809
810 if (x->size < y->size)
811 return 1;
812
813 return 0;
814 }
815
print_outstanding_allocs(int allocs_fd,int stack_traces_fd)816 int print_outstanding_allocs(int allocs_fd, int stack_traces_fd)
817 {
818 time_t t = time(NULL);
819 struct tm *tm = localtime(&t);
820
821 size_t nr_allocs = 0;
822
823 // for each struct alloc_info "alloc_info" in the bpf map "allocs"
824 for (uint64_t prev_key = 0, curr_key = 0;; prev_key = curr_key) {
825 struct alloc_info alloc_info = {};
826 memset(&alloc_info, 0, sizeof(alloc_info));
827
828 if (bpf_map_get_next_key(allocs_fd, &prev_key, &curr_key)) {
829 if (errno == ENOENT) {
830 break; // no more keys, done
831 }
832
833 perror("map get next key error");
834
835 return -errno;
836 }
837
838 if (bpf_map_lookup_elem(allocs_fd, &curr_key, &alloc_info)) {
839 if (errno == ENOENT)
840 continue;
841
842 perror("map lookup error");
843
844 return -errno;
845 }
846
847 // filter by age
848 if (get_ktime_ns() - env.min_age_ns < alloc_info.timestamp_ns) {
849 continue;
850 }
851
852 // filter invalid stacks
853 if (alloc_info.stack_id < 0) {
854 continue;
855 }
856
857 // when the stack_id exists in the allocs array,
858 // increment size with alloc_info.size
859 bool stack_exists = false;
860
861 for (size_t i = 0; !stack_exists && i < nr_allocs; ++i) {
862 struct allocation *alloc = &allocs[i];
863
864 if (alloc->stack_id == alloc_info.stack_id) {
865 alloc->size += alloc_info.size;
866 alloc->count++;
867
868 if (env.show_allocs) {
869 struct allocation_node* node = malloc(sizeof(struct allocation_node));
870 if (!node) {
871 perror("malloc failed");
872 return -errno;
873 }
874 node->address = curr_key;
875 node->size = alloc_info.size;
876 node->next = alloc->allocations;
877 alloc->allocations = node;
878 }
879
880 stack_exists = true;
881 break;
882 }
883 }
884
885 if (stack_exists)
886 continue;
887
888 // when the stack_id does not exist in the allocs array,
889 // create a new entry in the array
890 struct allocation alloc = {
891 .stack_id = alloc_info.stack_id,
892 .size = alloc_info.size,
893 .count = 1,
894 .allocations = NULL
895 };
896
897 if (env.show_allocs) {
898 struct allocation_node* node = malloc(sizeof(struct allocation_node));
899 if (!node) {
900 perror("malloc failed");
901 return -errno;
902 }
903 node->address = curr_key;
904 node->size = alloc_info.size;
905 node->next = NULL;
906 alloc.allocations = node;
907 }
908
909 memcpy(&allocs[nr_allocs], &alloc, sizeof(alloc));
910 nr_allocs++;
911 }
912
913 // sort the allocs array in descending order
914 qsort(allocs, nr_allocs, sizeof(allocs[0]), alloc_size_compare);
915
916 // get min of allocs we stored vs the top N requested stacks
917 size_t nr_allocs_to_show = nr_allocs < env.top_stacks ? nr_allocs : env.top_stacks;
918
919 printf("[%d:%d:%d] Top %zu stacks with outstanding allocations:\n",
920 tm->tm_hour, tm->tm_min, tm->tm_sec, nr_allocs_to_show);
921
922 print_stack_frames(allocs, nr_allocs_to_show, stack_traces_fd);
923
924 // Reset allocs list so that we dont accidentaly reuse data the next time we call this function
925 for (size_t i = 0; i < nr_allocs; i++) {
926 allocs[i].stack_id = 0;
927 if (env.show_allocs) {
928 struct allocation_node *it = allocs[i].allocations;
929 while (it != NULL) {
930 struct allocation_node *this = it;
931 it = it->next;
932 free(this);
933 }
934 allocs[i].allocations = NULL;
935 }
936 }
937
938 return 0;
939 }
940
print_outstanding_combined_allocs(int combined_allocs_fd,int stack_traces_fd)941 int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_fd)
942 {
943 time_t t = time(NULL);
944 struct tm *tm = localtime(&t);
945
946 size_t nr_allocs = 0;
947
948 // for each stack_id "curr_key" and union combined_alloc_info "alloc"
949 // in bpf_map "combined_allocs"
950 for (uint64_t prev_key = 0, curr_key = 0;; prev_key = curr_key) {
951 union combined_alloc_info combined_alloc_info;
952 memset(&combined_alloc_info, 0, sizeof(combined_alloc_info));
953
954 if (bpf_map_get_next_key(combined_allocs_fd, &prev_key, &curr_key)) {
955 if (errno == ENOENT) {
956 break; // no more keys, done
957 }
958
959 perror("map get next key error");
960
961 return -errno;
962 }
963
964 if (bpf_map_lookup_elem(combined_allocs_fd, &curr_key, &combined_alloc_info)) {
965 if (errno == ENOENT)
966 continue;
967
968 perror("map lookup error");
969
970 return -errno;
971 }
972
973 const struct allocation alloc = {
974 .stack_id = curr_key,
975 .size = combined_alloc_info.total_size,
976 .count = combined_alloc_info.number_of_allocs,
977 .allocations = NULL
978 };
979
980 memcpy(&allocs[nr_allocs], &alloc, sizeof(alloc));
981 nr_allocs++;
982 }
983
984 qsort(allocs, nr_allocs, sizeof(allocs[0]), alloc_size_compare);
985
986 // get min of allocs we stored vs the top N requested stacks
987 nr_allocs = nr_allocs < env.top_stacks ? nr_allocs : env.top_stacks;
988
989 printf("[%d:%d:%d] Top %zu stacks with outstanding allocations:\n",
990 tm->tm_hour, tm->tm_min, tm->tm_sec, nr_allocs);
991
992 print_stack_frames(allocs, nr_allocs, stack_traces_fd);
993
994 return 0;
995 }
996
has_kernel_node_tracepoints()997 bool has_kernel_node_tracepoints()
998 {
999 return tracepoint_exists("kmem", "kmalloc_node") &&
1000 tracepoint_exists("kmem", "kmem_cache_alloc_node");
1001 }
1002
disable_kernel_node_tracepoints(struct memleak_bpf * skel)1003 void disable_kernel_node_tracepoints(struct memleak_bpf *skel)
1004 {
1005 bpf_program__set_autoload(skel->progs.memleak__kmalloc_node, false);
1006 bpf_program__set_autoload(skel->progs.memleak__kmem_cache_alloc_node, false);
1007 }
1008
disable_kernel_percpu_tracepoints(struct memleak_bpf * skel)1009 void disable_kernel_percpu_tracepoints(struct memleak_bpf *skel)
1010 {
1011 bpf_program__set_autoload(skel->progs.memleak__percpu_alloc_percpu, false);
1012 bpf_program__set_autoload(skel->progs.memleak__percpu_free_percpu, false);
1013 }
1014
disable_kernel_tracepoints(struct memleak_bpf * skel)1015 void disable_kernel_tracepoints(struct memleak_bpf *skel)
1016 {
1017 bpf_program__set_autoload(skel->progs.memleak__kmalloc, false);
1018 bpf_program__set_autoload(skel->progs.memleak__kmalloc_node, false);
1019 bpf_program__set_autoload(skel->progs.memleak__kfree, false);
1020 bpf_program__set_autoload(skel->progs.memleak__kmem_cache_alloc, false);
1021 bpf_program__set_autoload(skel->progs.memleak__kmem_cache_alloc_node, false);
1022 bpf_program__set_autoload(skel->progs.memleak__kmem_cache_free, false);
1023 bpf_program__set_autoload(skel->progs.memleak__mm_page_alloc, false);
1024 bpf_program__set_autoload(skel->progs.memleak__mm_page_free, false);
1025 bpf_program__set_autoload(skel->progs.memleak__percpu_alloc_percpu, false);
1026 bpf_program__set_autoload(skel->progs.memleak__percpu_free_percpu, false);
1027 }
1028
attach_uprobes(struct memleak_bpf * skel)1029 int attach_uprobes(struct memleak_bpf *skel)
1030 {
1031 ATTACH_UPROBE_CHECKED(skel, malloc, malloc_enter);
1032 ATTACH_URETPROBE_CHECKED(skel, malloc, malloc_exit);
1033
1034 ATTACH_UPROBE_CHECKED(skel, calloc, calloc_enter);
1035 ATTACH_URETPROBE_CHECKED(skel, calloc, calloc_exit);
1036
1037 ATTACH_UPROBE_CHECKED(skel, realloc, realloc_enter);
1038 ATTACH_URETPROBE_CHECKED(skel, realloc, realloc_exit);
1039
1040 ATTACH_UPROBE_CHECKED(skel, mmap, mmap_enter);
1041 ATTACH_URETPROBE_CHECKED(skel, mmap, mmap_exit);
1042
1043 ATTACH_UPROBE_CHECKED(skel, posix_memalign, posix_memalign_enter);
1044 ATTACH_URETPROBE_CHECKED(skel, posix_memalign, posix_memalign_exit);
1045
1046 ATTACH_UPROBE_CHECKED(skel, memalign, memalign_enter);
1047 ATTACH_URETPROBE_CHECKED(skel, memalign, memalign_exit);
1048
1049 ATTACH_UPROBE_CHECKED(skel, free, free_enter);
1050 ATTACH_UPROBE_CHECKED(skel, munmap, munmap_enter);
1051
1052 // the following probes are intentinally allowed to fail attachment
1053
1054 // deprecated in libc.so bionic
1055 ATTACH_UPROBE(skel, valloc, valloc_enter);
1056 ATTACH_URETPROBE(skel, valloc, valloc_exit);
1057
1058 // deprecated in libc.so bionic
1059 ATTACH_UPROBE(skel, pvalloc, pvalloc_enter);
1060 ATTACH_URETPROBE(skel, pvalloc, pvalloc_exit);
1061
1062 // added in C11
1063 ATTACH_UPROBE(skel, aligned_alloc, aligned_alloc_enter);
1064 ATTACH_URETPROBE(skel, aligned_alloc, aligned_alloc_exit);
1065
1066
1067 return 0;
1068 }
1069