1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/cgroup.h"
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/hashmap.h"
6 #include "util/machine.h"
7 #include "util/map.h"
8 #include "util/symbol.h"
9 #include "util/target.h"
10 #include "util/thread.h"
11 #include "util/thread_map.h"
12 #include "util/lock-contention.h"
13 #include <linux/zalloc.h>
14 #include <linux/string.h>
15 #include <bpf/bpf.h>
16 #include <bpf/btf.h>
17 #include <inttypes.h>
18
19 #include "bpf_skel/lock_contention.skel.h"
20 #include "bpf_skel/lock_data.h"
21
22 static struct lock_contention_bpf *skel;
23 static bool has_slab_iter;
24 static struct hashmap slab_hash;
25
slab_cache_hash(long key,void * ctx __maybe_unused)26 static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
27 {
28 return key;
29 }
30
slab_cache_equal(long key1,long key2,void * ctx __maybe_unused)31 static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
32 {
33 return key1 == key2;
34 }
35
check_slab_cache_iter(struct lock_contention * con)36 static void check_slab_cache_iter(struct lock_contention *con)
37 {
38 struct btf *btf = btf__load_vmlinux_btf();
39 s32 ret;
40
41 hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
42
43 if (btf == NULL) {
44 pr_debug("BTF loading failed: %s\n", strerror(errno));
45 return;
46 }
47
48 ret = btf__find_by_name_kind(btf, "bpf_iter__kmem_cache", BTF_KIND_STRUCT);
49 if (ret < 0) {
50 bpf_program__set_autoload(skel->progs.slab_cache_iter, false);
51 pr_debug("slab cache iterator is not available: %d\n", ret);
52 goto out;
53 }
54
55 has_slab_iter = true;
56
57 bpf_map__set_max_entries(skel->maps.slab_caches, con->map_nr_entries);
58 out:
59 btf__free(btf);
60 }
61
run_slab_cache_iter(void)62 static void run_slab_cache_iter(void)
63 {
64 int fd;
65 char buf[256];
66 long key, *prev_key;
67
68 if (!has_slab_iter)
69 return;
70
71 fd = bpf_iter_create(bpf_link__fd(skel->links.slab_cache_iter));
72 if (fd < 0) {
73 pr_debug("cannot create slab cache iter: %d\n", fd);
74 return;
75 }
76
77 /* This will run the bpf program */
78 while (read(fd, buf, sizeof(buf)) > 0)
79 continue;
80
81 close(fd);
82
83 /* Read the slab cache map and build a hash with IDs */
84 fd = bpf_map__fd(skel->maps.slab_caches);
85 prev_key = NULL;
86 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
87 struct slab_cache_data *data;
88
89 data = malloc(sizeof(*data));
90 if (data == NULL)
91 break;
92
93 if (bpf_map_lookup_elem(fd, &key, data) < 0)
94 break;
95
96 hashmap__add(&slab_hash, data->id, data);
97 prev_key = &key;
98 }
99 }
100
exit_slab_cache_iter(void)101 static void exit_slab_cache_iter(void)
102 {
103 struct hashmap_entry *cur;
104 unsigned bkt;
105
106 hashmap__for_each_entry(&slab_hash, cur, bkt)
107 free(cur->pvalue);
108
109 hashmap__clear(&slab_hash);
110 }
111
lock_contention_prepare(struct lock_contention * con)112 int lock_contention_prepare(struct lock_contention *con)
113 {
114 int i, fd;
115 int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1, nslabs = 1;
116 struct evlist *evlist = con->evlist;
117 struct target *target = con->target;
118
119 skel = lock_contention_bpf__open();
120 if (!skel) {
121 pr_err("Failed to open lock-contention BPF skeleton\n");
122 return -1;
123 }
124
125 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
126 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
127 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
128
129 if (con->aggr_mode == LOCK_AGGR_TASK)
130 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
131 else
132 bpf_map__set_max_entries(skel->maps.task_data, 1);
133
134 if (con->save_callstack)
135 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
136 else
137 bpf_map__set_max_entries(skel->maps.stacks, 1);
138
139 if (target__has_cpu(target)) {
140 skel->rodata->has_cpu = 1;
141 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
142 }
143 if (target__has_task(target)) {
144 skel->rodata->has_task = 1;
145 ntasks = perf_thread_map__nr(evlist->core.threads);
146 }
147 if (con->filters->nr_types) {
148 skel->rodata->has_type = 1;
149 ntypes = con->filters->nr_types;
150 }
151 if (con->filters->nr_cgrps) {
152 skel->rodata->has_cgroup = 1;
153 ncgrps = con->filters->nr_cgrps;
154 }
155
156 /* resolve lock name filters to addr */
157 if (con->filters->nr_syms) {
158 struct symbol *sym;
159 struct map *kmap;
160 unsigned long *addrs;
161
162 for (i = 0; i < con->filters->nr_syms; i++) {
163 sym = machine__find_kernel_symbol_by_name(con->machine,
164 con->filters->syms[i],
165 &kmap);
166 if (sym == NULL) {
167 pr_warning("ignore unknown symbol: %s\n",
168 con->filters->syms[i]);
169 continue;
170 }
171
172 addrs = realloc(con->filters->addrs,
173 (con->filters->nr_addrs + 1) * sizeof(*addrs));
174 if (addrs == NULL) {
175 pr_warning("memory allocation failure\n");
176 continue;
177 }
178
179 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
180 con->filters->addrs = addrs;
181 }
182 naddrs = con->filters->nr_addrs;
183 skel->rodata->has_addr = 1;
184 }
185
186 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
187 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
188 bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
189 bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
190 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
191
192 skel->rodata->stack_skip = con->stack_skip;
193 skel->rodata->aggr_mode = con->aggr_mode;
194 skel->rodata->needs_callstack = con->save_callstack;
195 skel->rodata->lock_owner = con->owner;
196
197 if (con->aggr_mode == LOCK_AGGR_CGROUP || con->filters->nr_cgrps) {
198 if (cgroup_is_v2("perf_event"))
199 skel->rodata->use_cgroup_v2 = 1;
200 }
201
202 check_slab_cache_iter(con);
203
204 if (con->filters->nr_slabs && has_slab_iter) {
205 skel->rodata->has_slab = 1;
206 nslabs = con->filters->nr_slabs;
207 }
208
209 bpf_map__set_max_entries(skel->maps.slab_filter, nslabs);
210
211 if (lock_contention_bpf__load(skel) < 0) {
212 pr_err("Failed to load lock-contention BPF skeleton\n");
213 return -1;
214 }
215
216 if (target__has_cpu(target)) {
217 u32 cpu;
218 u8 val = 1;
219
220 fd = bpf_map__fd(skel->maps.cpu_filter);
221
222 for (i = 0; i < ncpus; i++) {
223 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
224 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
225 }
226 }
227
228 if (target__has_task(target)) {
229 u32 pid;
230 u8 val = 1;
231
232 fd = bpf_map__fd(skel->maps.task_filter);
233
234 for (i = 0; i < ntasks; i++) {
235 pid = perf_thread_map__pid(evlist->core.threads, i);
236 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
237 }
238 }
239
240 if (target__none(target) && evlist->workload.pid > 0) {
241 u32 pid = evlist->workload.pid;
242 u8 val = 1;
243
244 fd = bpf_map__fd(skel->maps.task_filter);
245 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
246 }
247
248 if (con->filters->nr_types) {
249 u8 val = 1;
250
251 fd = bpf_map__fd(skel->maps.type_filter);
252
253 for (i = 0; i < con->filters->nr_types; i++)
254 bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
255 }
256
257 if (con->filters->nr_addrs) {
258 u8 val = 1;
259
260 fd = bpf_map__fd(skel->maps.addr_filter);
261
262 for (i = 0; i < con->filters->nr_addrs; i++)
263 bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
264 }
265
266 if (con->filters->nr_cgrps) {
267 u8 val = 1;
268
269 fd = bpf_map__fd(skel->maps.cgroup_filter);
270
271 for (i = 0; i < con->filters->nr_cgrps; i++)
272 bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
273 }
274
275 if (con->aggr_mode == LOCK_AGGR_CGROUP)
276 read_all_cgroups(&con->cgroups);
277
278 bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
279
280 lock_contention_bpf__attach(skel);
281
282 /* run the slab iterator after attaching */
283 run_slab_cache_iter();
284
285 if (con->filters->nr_slabs) {
286 u8 val = 1;
287 int cache_fd;
288 long key, *prev_key;
289
290 fd = bpf_map__fd(skel->maps.slab_filter);
291
292 /* Read the slab cache map and build a hash with its address */
293 cache_fd = bpf_map__fd(skel->maps.slab_caches);
294 prev_key = NULL;
295 while (!bpf_map_get_next_key(cache_fd, prev_key, &key)) {
296 struct slab_cache_data data;
297
298 if (bpf_map_lookup_elem(cache_fd, &key, &data) < 0)
299 break;
300
301 for (i = 0; i < con->filters->nr_slabs; i++) {
302 if (!strcmp(con->filters->slabs[i], data.name)) {
303 bpf_map_update_elem(fd, &key, &val, BPF_ANY);
304 break;
305 }
306 }
307 prev_key = &key;
308 }
309 }
310
311 return 0;
312 }
313
314 /*
315 * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end
316 * timestamp in ktime so that it can calculate delta easily.
317 */
mark_end_timestamp(void)318 static void mark_end_timestamp(void)
319 {
320 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
321 .flags = BPF_F_TEST_RUN_ON_CPU,
322 );
323 int prog_fd = bpf_program__fd(skel->progs.end_timestamp);
324
325 bpf_prog_test_run_opts(prog_fd, &opts);
326 }
327
update_lock_stat(int map_fd,int pid,u64 end_ts,enum lock_aggr_mode aggr_mode,struct tstamp_data * ts_data)328 static void update_lock_stat(int map_fd, int pid, u64 end_ts,
329 enum lock_aggr_mode aggr_mode,
330 struct tstamp_data *ts_data)
331 {
332 u64 delta;
333 struct contention_key stat_key = {};
334 struct contention_data stat_data;
335
336 if (ts_data->timestamp >= end_ts)
337 return;
338
339 delta = end_ts - ts_data->timestamp;
340
341 switch (aggr_mode) {
342 case LOCK_AGGR_CALLER:
343 stat_key.stack_id = ts_data->stack_id;
344 break;
345 case LOCK_AGGR_TASK:
346 stat_key.pid = pid;
347 break;
348 case LOCK_AGGR_ADDR:
349 stat_key.lock_addr_or_cgroup = ts_data->lock;
350 break;
351 case LOCK_AGGR_CGROUP:
352 /* TODO */
353 return;
354 default:
355 return;
356 }
357
358 if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0)
359 return;
360
361 stat_data.total_time += delta;
362 stat_data.count++;
363
364 if (delta > stat_data.max_time)
365 stat_data.max_time = delta;
366 if (delta < stat_data.min_time)
367 stat_data.min_time = delta;
368
369 bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST);
370 }
371
372 /*
373 * Account entries in the tstamp map (which didn't see the corresponding
374 * lock:contention_end tracepoint) using end_ts.
375 */
account_end_timestamp(struct lock_contention * con)376 static void account_end_timestamp(struct lock_contention *con)
377 {
378 int ts_fd, stat_fd;
379 int *prev_key, key;
380 u64 end_ts = skel->bss->end_ts;
381 int total_cpus;
382 enum lock_aggr_mode aggr_mode = con->aggr_mode;
383 struct tstamp_data ts_data, *cpu_data;
384
385 /* Iterate per-task tstamp map (key = TID) */
386 ts_fd = bpf_map__fd(skel->maps.tstamp);
387 stat_fd = bpf_map__fd(skel->maps.lock_stat);
388
389 prev_key = NULL;
390 while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
391 if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) {
392 int pid = key;
393
394 if (aggr_mode == LOCK_AGGR_TASK && con->owner)
395 pid = ts_data.flags;
396
397 update_lock_stat(stat_fd, pid, end_ts, aggr_mode,
398 &ts_data);
399 }
400
401 prev_key = &key;
402 }
403
404 /* Now it'll check per-cpu tstamp map which doesn't have TID. */
405 if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP)
406 return;
407
408 total_cpus = cpu__max_cpu().cpu;
409 ts_fd = bpf_map__fd(skel->maps.tstamp_cpu);
410
411 cpu_data = calloc(total_cpus, sizeof(*cpu_data));
412 if (cpu_data == NULL)
413 return;
414
415 prev_key = NULL;
416 while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) {
417 if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0)
418 goto next;
419
420 for (int i = 0; i < total_cpus; i++) {
421 if (cpu_data[i].lock == 0)
422 continue;
423
424 update_lock_stat(stat_fd, -1, end_ts, aggr_mode,
425 &cpu_data[i]);
426 }
427
428 next:
429 prev_key = &key;
430 }
431 free(cpu_data);
432 }
433
lock_contention_start(void)434 int lock_contention_start(void)
435 {
436 skel->bss->enabled = 1;
437 return 0;
438 }
439
lock_contention_stop(void)440 int lock_contention_stop(void)
441 {
442 skel->bss->enabled = 0;
443 mark_end_timestamp();
444 return 0;
445 }
446
lock_contention_get_name(struct lock_contention * con,struct contention_key * key,u64 * stack_trace,u32 flags)447 static const char *lock_contention_get_name(struct lock_contention *con,
448 struct contention_key *key,
449 u64 *stack_trace, u32 flags)
450 {
451 int idx = 0;
452 u64 addr;
453 const char *name = "";
454 static char name_buf[KSYM_NAME_LEN];
455 struct symbol *sym;
456 struct map *kmap;
457 struct machine *machine = con->machine;
458
459 if (con->aggr_mode == LOCK_AGGR_TASK) {
460 struct contention_task_data task;
461 int pid = key->pid;
462 int task_fd = bpf_map__fd(skel->maps.task_data);
463
464 /* do not update idle comm which contains CPU number */
465 if (pid) {
466 struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid);
467
468 if (t == NULL)
469 return name;
470 if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
471 thread__set_comm(t, task.comm, /*timestamp=*/0))
472 name = task.comm;
473 }
474 return name;
475 }
476
477 if (con->aggr_mode == LOCK_AGGR_ADDR) {
478 int lock_fd = bpf_map__fd(skel->maps.lock_syms);
479 struct slab_cache_data *slab_data;
480
481 /* per-process locks set upper bits of the flags */
482 if (flags & LCD_F_MMAP_LOCK)
483 return "mmap_lock";
484 if (flags & LCD_F_SIGHAND_LOCK)
485 return "siglock";
486
487 /* global locks with symbols */
488 sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
489 if (sym)
490 return sym->name;
491
492 /* try semi-global locks collected separately */
493 if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
494 if (flags == LOCK_CLASS_RQLOCK)
495 return "rq_lock";
496 }
497
498 /* look slab_hash for dynamic locks in a slab object */
499 if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
500 snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
501 return name_buf;
502 }
503
504 return "";
505 }
506
507 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
508 u64 cgrp_id = key->lock_addr_or_cgroup;
509 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
510
511 if (cgrp)
512 return cgrp->name;
513
514 snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
515 return name_buf;
516 }
517
518 /* LOCK_AGGR_CALLER: skip lock internal functions */
519 while (machine__is_lock_function(machine, stack_trace[idx]) &&
520 idx < con->max_stack - 1)
521 idx++;
522
523 addr = stack_trace[idx];
524 sym = machine__find_kernel_symbol(machine, addr, &kmap);
525
526 if (sym) {
527 unsigned long offset;
528
529 offset = map__map_ip(kmap, addr) - sym->start;
530
531 if (offset == 0)
532 return sym->name;
533
534 snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
535 } else {
536 snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
537 }
538
539 return name_buf;
540 }
541
lock_contention_read(struct lock_contention * con)542 int lock_contention_read(struct lock_contention *con)
543 {
544 int fd, stack, err = 0;
545 struct contention_key *prev_key, key = {};
546 struct contention_data data = {};
547 struct lock_stat *st = NULL;
548 struct machine *machine = con->machine;
549 u64 *stack_trace;
550 size_t stack_size = con->max_stack * sizeof(*stack_trace);
551
552 fd = bpf_map__fd(skel->maps.lock_stat);
553 stack = bpf_map__fd(skel->maps.stacks);
554
555 con->fails.task = skel->bss->task_fail;
556 con->fails.stack = skel->bss->stack_fail;
557 con->fails.time = skel->bss->time_fail;
558 con->fails.data = skel->bss->data_fail;
559
560 stack_trace = zalloc(stack_size);
561 if (stack_trace == NULL)
562 return -1;
563
564 account_end_timestamp(con);
565
566 if (con->aggr_mode == LOCK_AGGR_TASK) {
567 struct thread *idle = machine__findnew_thread(machine,
568 /*pid=*/0,
569 /*tid=*/0);
570 thread__set_comm(idle, "swapper", /*timestamp=*/0);
571 }
572
573 if (con->aggr_mode == LOCK_AGGR_ADDR) {
574 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
575 .flags = BPF_F_TEST_RUN_ON_CPU,
576 );
577 int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
578
579 bpf_prog_test_run_opts(prog_fd, &opts);
580 }
581
582 /* make sure it loads the kernel map */
583 maps__load_first(machine->kmaps);
584
585 prev_key = NULL;
586 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
587 s64 ls_key;
588 const char *name;
589
590 /* to handle errors in the loop body */
591 err = -1;
592
593 bpf_map_lookup_elem(fd, &key, &data);
594 if (con->save_callstack) {
595 bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
596
597 if (!match_callstack_filter(machine, stack_trace, con->max_stack)) {
598 con->nr_filtered += data.count;
599 goto next;
600 }
601 }
602
603 switch (con->aggr_mode) {
604 case LOCK_AGGR_CALLER:
605 ls_key = key.stack_id;
606 break;
607 case LOCK_AGGR_TASK:
608 ls_key = key.pid;
609 break;
610 case LOCK_AGGR_ADDR:
611 case LOCK_AGGR_CGROUP:
612 ls_key = key.lock_addr_or_cgroup;
613 break;
614 default:
615 goto next;
616 }
617
618 st = lock_stat_find(ls_key);
619 if (st != NULL) {
620 st->wait_time_total += data.total_time;
621 if (st->wait_time_max < data.max_time)
622 st->wait_time_max = data.max_time;
623 if (st->wait_time_min > data.min_time)
624 st->wait_time_min = data.min_time;
625
626 st->nr_contended += data.count;
627 if (st->nr_contended)
628 st->avg_wait_time = st->wait_time_total / st->nr_contended;
629 goto next;
630 }
631
632 name = lock_contention_get_name(con, &key, stack_trace, data.flags);
633 st = lock_stat_findnew(ls_key, name, data.flags);
634 if (st == NULL)
635 break;
636
637 st->nr_contended = data.count;
638 st->wait_time_total = data.total_time;
639 st->wait_time_max = data.max_time;
640 st->wait_time_min = data.min_time;
641
642 if (data.count)
643 st->avg_wait_time = data.total_time / data.count;
644
645 if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
646 st->callstack = memdup(stack_trace, stack_size);
647 if (st->callstack == NULL)
648 break;
649 }
650
651 next:
652 prev_key = &key;
653
654 /* we're fine now, reset the error */
655 err = 0;
656 }
657
658 free(stack_trace);
659
660 return err;
661 }
662
lock_contention_finish(struct lock_contention * con)663 int lock_contention_finish(struct lock_contention *con)
664 {
665 if (skel) {
666 skel->bss->enabled = 0;
667 lock_contention_bpf__destroy(skel);
668 }
669
670 while (!RB_EMPTY_ROOT(&con->cgroups)) {
671 struct rb_node *node = rb_first(&con->cgroups);
672 struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
673
674 rb_erase(node, &con->cgroups);
675 cgroup__put(cgrp);
676 }
677
678 exit_slab_cache_iter();
679
680 return 0;
681 }
682