1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Google LLC.
3 *
4 * Based on klockstat from BCC by Jiri Olsa and others
5 * 2021-10-26 Barret Rhoden Created this.
6 */
7 #include "vmlinux.h"
8 #include <bpf/bpf_core_read.h>
9 #include <bpf/bpf_helpers.h>
10 #include <bpf/bpf_tracing.h>
11 #include "klockstat.h"
12 #include "bits.bpf.h"
13
14 const volatile pid_t targ_tgid = 0;
15 const volatile pid_t targ_pid = 0;
16 void *const volatile targ_lock = NULL;
17 const volatile int per_thread = 0;
18
19 struct {
20 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
21 __uint(max_entries, MAX_ENTRIES);
22 __uint(key_size, sizeof(u32));
23 __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
24 } stack_map SEC(".maps");
25
26 /*
27 * Uniquely identifies a task grabbing a particular lock; a task can only hold
28 * the same lock once (non-recursive mutexes).
29 */
30 struct task_lock {
31 u64 task_id;
32 u64 lock_ptr;
33 };
34
35 struct lockholder_info {
36 s32 stack_id;
37 u64 task_id;
38 u64 try_at;
39 u64 acq_at;
40 u64 rel_at;
41 u64 lock_ptr;
42 };
43
44 struct {
45 __uint(type, BPF_MAP_TYPE_HASH);
46 __uint(max_entries, MAX_ENTRIES);
47 __type(key, struct task_lock);
48 __type(value, struct lockholder_info);
49 } lockholder_map SEC(".maps");
50
51 /*
52 * Keyed by stack_id.
53 *
54 * Multiple call sites may have the same underlying lock, but we only know the
55 * stats for a particular stack frame. Multiple tasks may have the same
56 * stackframe.
57 */
58 struct {
59 __uint(type, BPF_MAP_TYPE_HASH);
60 __uint(max_entries, MAX_ENTRIES);
61 __type(key, s32);
62 __type(value, struct lock_stat);
63 } stat_map SEC(".maps");
64
65 struct {
66 __uint(type, BPF_MAP_TYPE_HASH);
67 __uint(max_entries, MAX_ENTRIES);
68 __type(key, u32);
69 __type(value, void *);
70 } locks SEC(".maps");
71
tracing_task(u64 task_id)72 static bool tracing_task(u64 task_id)
73 {
74 u32 tgid = task_id >> 32;
75 u32 pid = task_id;
76
77 if (targ_tgid && targ_tgid != tgid)
78 return false;
79 if (targ_pid && targ_pid != pid)
80 return false;
81 return true;
82 }
83
lock_contended(void * ctx,void * lock)84 static void lock_contended(void *ctx, void *lock)
85 {
86 u64 task_id;
87 struct lockholder_info li[1] = {0};
88 struct task_lock tl = {};
89
90 if (targ_lock && targ_lock != lock)
91 return;
92 task_id = bpf_get_current_pid_tgid();
93 if (!tracing_task(task_id))
94 return;
95
96 li->task_id = task_id;
97 li->lock_ptr = (u64)lock;
98 /*
99 * Skip 4 frames, e.g.:
100 * __this_module+0x34ef
101 * __this_module+0x34ef
102 * __this_module+0x8c44
103 * mutex_lock+0x5
104 *
105 * Note: if you make major changes to this bpf program, double check
106 * that you aren't skipping too many frames.
107 */
108 li->stack_id = bpf_get_stackid(ctx, &stack_map, 4 | BPF_F_FAST_STACK_CMP);
109
110 /* Legit failures include EEXIST */
111 if (li->stack_id < 0)
112 return;
113 li->try_at = bpf_ktime_get_ns();
114
115 tl.task_id = task_id;
116 tl.lock_ptr = (u64)lock;
117 bpf_map_update_elem(&lockholder_map, &tl, li, BPF_ANY);
118 }
119
lock_aborted(void * lock)120 static void lock_aborted(void *lock)
121 {
122 u64 task_id;
123 struct task_lock tl = {};
124
125 if (targ_lock && targ_lock != lock)
126 return;
127 task_id = bpf_get_current_pid_tgid();
128 if (!tracing_task(task_id))
129 return;
130 tl.task_id = task_id;
131 tl.lock_ptr = (u64)lock;
132 bpf_map_delete_elem(&lockholder_map, &tl);
133 }
134
lock_acquired(void * lock)135 static void lock_acquired(void *lock)
136 {
137 u64 task_id;
138 struct lockholder_info *li;
139 struct task_lock tl = {};
140
141 if (targ_lock && targ_lock != lock)
142 return;
143 task_id = bpf_get_current_pid_tgid();
144 if (!tracing_task(task_id))
145 return;
146
147 tl.task_id = task_id;
148 tl.lock_ptr = (u64)lock;
149 li = bpf_map_lookup_elem(&lockholder_map, &tl);
150 if (!li)
151 return;
152
153 li->acq_at = bpf_ktime_get_ns();
154 }
155
account(struct lockholder_info * li)156 static void account(struct lockholder_info *li)
157 {
158 struct lock_stat *ls;
159 u64 delta;
160 u32 key = li->stack_id;
161
162 if (per_thread)
163 key = li->task_id;
164
165 /*
166 * Multiple threads may have the same stack_id. Even though we are
167 * holding the lock, dynamically allocated mutexes can have the same
168 * callgraph but represent different locks. Also, a rwsem can be held
169 * by multiple readers at the same time. They will be accounted as
170 * the same lock, which is what we want, but we need to use atomics to
171 * avoid corruption, especially for the total_time variables.
172 * But it should be ok for per-thread since it's not racy anymore.
173 */
174 ls = bpf_map_lookup_elem(&stat_map, &key);
175 if (!ls) {
176 struct lock_stat fresh = {0};
177
178 bpf_map_update_elem(&stat_map, &key, &fresh, BPF_ANY);
179 ls = bpf_map_lookup_elem(&stat_map, &key);
180 if (!ls)
181 return;
182
183 if (per_thread)
184 bpf_get_current_comm(ls->acq_max_comm, TASK_COMM_LEN);
185 }
186
187 delta = li->acq_at - li->try_at;
188 __sync_fetch_and_add(&ls->acq_count, 1);
189 __sync_fetch_and_add(&ls->acq_total_time, delta);
190 if (delta > READ_ONCE(ls->acq_max_time)) {
191 WRITE_ONCE(ls->acq_max_time, delta);
192 WRITE_ONCE(ls->acq_max_id, li->task_id);
193 WRITE_ONCE(ls->acq_max_lock_ptr, li->lock_ptr);
194 /*
195 * Potentially racy, if multiple threads think they are the max,
196 * so you may get a clobbered write.
197 */
198 if (!per_thread)
199 bpf_get_current_comm(ls->acq_max_comm, TASK_COMM_LEN);
200 }
201
202 delta = li->rel_at - li->acq_at;
203 __sync_fetch_and_add(&ls->hld_count, 1);
204 __sync_fetch_and_add(&ls->hld_total_time, delta);
205 if (delta > READ_ONCE(ls->hld_max_time)) {
206 WRITE_ONCE(ls->hld_max_time, delta);
207 WRITE_ONCE(ls->hld_max_id, li->task_id);
208 WRITE_ONCE(ls->hld_max_lock_ptr, li->lock_ptr);
209 if (!per_thread)
210 bpf_get_current_comm(ls->hld_max_comm, TASK_COMM_LEN);
211 }
212 }
213
lock_released(void * lock)214 static void lock_released(void *lock)
215 {
216 u64 task_id;
217 struct lockholder_info *li;
218 struct task_lock tl = {};
219
220 if (targ_lock && targ_lock != lock)
221 return;
222 task_id = bpf_get_current_pid_tgid();
223 if (!tracing_task(task_id))
224 return;
225 tl.task_id = task_id;
226 tl.lock_ptr = (u64)lock;
227 li = bpf_map_lookup_elem(&lockholder_map, &tl);
228 if (!li)
229 return;
230
231 li->rel_at = bpf_ktime_get_ns();
232 account(li);
233
234 bpf_map_delete_elem(&lockholder_map, &tl);
235 }
236
237 SEC("fentry/mutex_lock")
BPF_PROG(mutex_lock,struct mutex * lock)238 int BPF_PROG(mutex_lock, struct mutex *lock)
239 {
240 lock_contended(ctx, lock);
241 return 0;
242 }
243
244 SEC("fexit/mutex_lock")
BPF_PROG(mutex_lock_exit,struct mutex * lock,long ret)245 int BPF_PROG(mutex_lock_exit, struct mutex *lock, long ret)
246 {
247 lock_acquired(lock);
248 return 0;
249 }
250
251 SEC("fexit/mutex_trylock")
BPF_PROG(mutex_trylock_exit,struct mutex * lock,long ret)252 int BPF_PROG(mutex_trylock_exit, struct mutex *lock, long ret)
253 {
254 if (ret) {
255 lock_contended(ctx, lock);
256 lock_acquired(lock);
257 }
258 return 0;
259 }
260
261 SEC("fentry/mutex_lock_interruptible")
BPF_PROG(mutex_lock_interruptible,struct mutex * lock)262 int BPF_PROG(mutex_lock_interruptible, struct mutex *lock)
263 {
264 lock_contended(ctx, lock);
265 return 0;
266 }
267
268 SEC("fexit/mutex_lock_interruptible")
BPF_PROG(mutex_lock_interruptible_exit,struct mutex * lock,long ret)269 int BPF_PROG(mutex_lock_interruptible_exit, struct mutex *lock, long ret)
270 {
271 if (ret)
272 lock_aborted(lock);
273 else
274 lock_acquired(lock);
275 return 0;
276 }
277
278 SEC("fentry/mutex_lock_killable")
BPF_PROG(mutex_lock_killable,struct mutex * lock)279 int BPF_PROG(mutex_lock_killable, struct mutex *lock)
280 {
281 lock_contended(ctx, lock);
282 return 0;
283 }
284
285 SEC("fexit/mutex_lock_killable")
BPF_PROG(mutex_lock_killable_exit,struct mutex * lock,long ret)286 int BPF_PROG(mutex_lock_killable_exit, struct mutex *lock, long ret)
287 {
288 if (ret)
289 lock_aborted(lock);
290 else
291 lock_acquired(lock);
292 return 0;
293 }
294
295 SEC("fentry/mutex_unlock")
BPF_PROG(mutex_unlock,struct mutex * lock)296 int BPF_PROG(mutex_unlock, struct mutex *lock)
297 {
298 lock_released(lock);
299 return 0;
300 }
301
302 SEC("fentry/down_read")
BPF_PROG(down_read,struct rw_semaphore * lock)303 int BPF_PROG(down_read, struct rw_semaphore *lock)
304 {
305 lock_contended(ctx, lock);
306 return 0;
307 }
308
309 SEC("fexit/down_read")
BPF_PROG(down_read_exit,struct rw_semaphore * lock,long ret)310 int BPF_PROG(down_read_exit, struct rw_semaphore *lock, long ret)
311 {
312 lock_acquired(lock);
313 return 0;
314 }
315
316 SEC("fexit/down_read_trylock")
BPF_PROG(down_read_trylock_exit,struct rw_semaphore * lock,long ret)317 int BPF_PROG(down_read_trylock_exit, struct rw_semaphore *lock, long ret)
318 {
319 if (ret == 1) {
320 lock_contended(ctx, lock);
321 lock_acquired(lock);
322 }
323 return 0;
324 }
325
326 SEC("fentry/down_read_interruptible")
BPF_PROG(down_read_interruptible,struct rw_semaphore * lock)327 int BPF_PROG(down_read_interruptible, struct rw_semaphore *lock)
328 {
329 lock_contended(ctx, lock);
330 return 0;
331 }
332
333 SEC("fexit/down_read_interruptible")
BPF_PROG(down_read_interruptible_exit,struct rw_semaphore * lock,long ret)334 int BPF_PROG(down_read_interruptible_exit, struct rw_semaphore *lock, long ret)
335 {
336 if (ret)
337 lock_aborted(lock);
338 else
339 lock_acquired(lock);
340 return 0;
341 }
342
343 SEC("fentry/down_read_killable")
BPF_PROG(down_read_killable,struct rw_semaphore * lock)344 int BPF_PROG(down_read_killable, struct rw_semaphore *lock)
345 {
346 lock_contended(ctx, lock);
347 return 0;
348 }
349
350 SEC("fexit/down_read_killable")
BPF_PROG(down_read_killable_exit,struct rw_semaphore * lock,long ret)351 int BPF_PROG(down_read_killable_exit, struct rw_semaphore *lock, long ret)
352 {
353 if (ret)
354 lock_aborted(lock);
355 else
356 lock_acquired(lock);
357 return 0;
358 }
359
360 SEC("fentry/up_read")
BPF_PROG(up_read,struct rw_semaphore * lock)361 int BPF_PROG(up_read, struct rw_semaphore *lock)
362 {
363 lock_released(lock);
364 return 0;
365 }
366
367 SEC("fentry/down_write")
BPF_PROG(down_write,struct rw_semaphore * lock)368 int BPF_PROG(down_write, struct rw_semaphore *lock)
369 {
370 lock_contended(ctx, lock);
371 return 0;
372 }
373
374 SEC("fexit/down_write")
BPF_PROG(down_write_exit,struct rw_semaphore * lock,long ret)375 int BPF_PROG(down_write_exit, struct rw_semaphore *lock, long ret)
376 {
377 lock_acquired(lock);
378 return 0;
379 }
380
381 SEC("fexit/down_write_trylock")
BPF_PROG(down_write_trylock_exit,struct rw_semaphore * lock,long ret)382 int BPF_PROG(down_write_trylock_exit, struct rw_semaphore *lock, long ret)
383 {
384 if (ret == 1) {
385 lock_contended(ctx, lock);
386 lock_acquired(lock);
387 }
388 return 0;
389 }
390
391 SEC("fentry/down_write_killable")
BPF_PROG(down_write_killable,struct rw_semaphore * lock)392 int BPF_PROG(down_write_killable, struct rw_semaphore *lock)
393 {
394 lock_contended(ctx, lock);
395 return 0;
396 }
397
398 SEC("fexit/down_write_killable")
BPF_PROG(down_write_killable_exit,struct rw_semaphore * lock,long ret)399 int BPF_PROG(down_write_killable_exit, struct rw_semaphore *lock, long ret)
400 {
401 if (ret)
402 lock_aborted(lock);
403 else
404 lock_acquired(lock);
405 return 0;
406 }
407
408 SEC("fentry/up_write")
BPF_PROG(up_write,struct rw_semaphore * lock)409 int BPF_PROG(up_write, struct rw_semaphore *lock)
410 {
411 lock_released(lock);
412 return 0;
413 }
414
415 SEC("kprobe/mutex_lock")
BPF_KPROBE(kprobe_mutex_lock,struct mutex * lock)416 int BPF_KPROBE(kprobe_mutex_lock, struct mutex *lock)
417 {
418 u32 tid = (u32)bpf_get_current_pid_tgid();
419
420 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
421 lock_contended(ctx, lock);
422 return 0;
423 }
424
425 SEC("kretprobe/mutex_lock")
BPF_KRETPROBE(kprobe_mutex_lock_exit,long ret)426 int BPF_KRETPROBE(kprobe_mutex_lock_exit, long ret)
427 {
428 u32 tid = (u32)bpf_get_current_pid_tgid();
429 void **lock;
430
431 lock = bpf_map_lookup_elem(&locks, &tid);
432 if (!lock)
433 return 0;
434
435 bpf_map_delete_elem(&locks, &tid);
436 lock_acquired(*lock);
437 return 0;
438 }
439
440 SEC("kprobe/mutex_trylock")
BPF_KPROBE(kprobe_mutex_trylock,struct mutex * lock)441 int BPF_KPROBE(kprobe_mutex_trylock, struct mutex *lock)
442 {
443 u32 tid = (u32)bpf_get_current_pid_tgid();
444
445 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
446 return 0;
447 }
448
449 SEC("kretprobe/mutex_trylock")
BPF_KRETPROBE(kprobe_mutex_trylock_exit,long ret)450 int BPF_KRETPROBE(kprobe_mutex_trylock_exit, long ret)
451 {
452 u32 tid = (u32)bpf_get_current_pid_tgid();
453 void **lock;
454
455 lock = bpf_map_lookup_elem(&locks, &tid);
456 if (!lock)
457 return 0;
458
459 bpf_map_delete_elem(&locks, &tid);
460
461 if (ret) {
462 lock_contended(ctx, *lock);
463 lock_acquired(*lock);
464 }
465 return 0;
466 }
467
468 SEC("kprobe/mutex_lock_interruptible")
BPF_KPROBE(kprobe_mutex_lock_interruptible,struct mutex * lock)469 int BPF_KPROBE(kprobe_mutex_lock_interruptible, struct mutex *lock)
470 {
471 u32 tid = (u32)bpf_get_current_pid_tgid();
472
473 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
474 lock_contended(ctx, lock);
475 return 0;
476 }
477
478 SEC("kretprobe/mutex_lock_interruptible")
BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit,long ret)479 int BPF_KRETPROBE(kprobe_mutex_lock_interruptible_exit, long ret)
480 {
481 u32 tid = (u32)bpf_get_current_pid_tgid();
482 void **lock;
483
484 lock = bpf_map_lookup_elem(&locks, &tid);
485 if (!lock)
486 return 0;
487
488 bpf_map_delete_elem(&locks, &tid);
489
490 if (ret)
491 lock_aborted(*lock);
492 else
493 lock_acquired(*lock);
494 return 0;
495 }
496
497 SEC("kprobe/mutex_lock_killable")
BPF_KPROBE(kprobe_mutex_lock_killable,struct mutex * lock)498 int BPF_KPROBE(kprobe_mutex_lock_killable, struct mutex *lock)
499 {
500 u32 tid = (u32)bpf_get_current_pid_tgid();
501
502 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
503 lock_contended(ctx, lock);
504 return 0;
505 }
506
507 SEC("kretprobe/mutex_lock_killable")
BPF_KRETPROBE(kprobe_mutex_lock_killable_exit,long ret)508 int BPF_KRETPROBE(kprobe_mutex_lock_killable_exit, long ret)
509 {
510 u32 tid = (u32)bpf_get_current_pid_tgid();
511 void **lock;
512
513 lock = bpf_map_lookup_elem(&locks, &tid);
514 if (!lock)
515 return 0;
516
517 bpf_map_delete_elem(&locks, &tid);
518
519 if (ret)
520 lock_aborted(*lock);
521 else
522 lock_acquired(*lock);
523 return 0;
524 }
525
526 SEC("kprobe/mutex_unlock")
BPF_KPROBE(kprobe_mutex_unlock,struct mutex * lock)527 int BPF_KPROBE(kprobe_mutex_unlock, struct mutex *lock)
528 {
529 lock_released(lock);
530 return 0;
531 }
532
533 SEC("kprobe/down_read")
BPF_KPROBE(kprobe_down_read,struct rw_semaphore * lock)534 int BPF_KPROBE(kprobe_down_read, struct rw_semaphore *lock)
535 {
536 u32 tid = (u32)bpf_get_current_pid_tgid();
537
538 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
539 lock_contended(ctx, lock);
540 return 0;
541 }
542
543 SEC("kretprobe/down_read")
BPF_KRETPROBE(kprobe_down_read_exit,long ret)544 int BPF_KRETPROBE(kprobe_down_read_exit, long ret)
545 {
546 u32 tid = (u32)bpf_get_current_pid_tgid();
547 void **lock;
548
549 lock = bpf_map_lookup_elem(&locks, &tid);
550 if (!lock)
551 return 0;
552
553 bpf_map_delete_elem(&locks, &tid);
554
555 lock_acquired(*lock);
556 return 0;
557 }
558
559 SEC("kprobe/down_read_trylock")
BPF_KPROBE(kprobe_down_read_trylock,struct rw_semaphore * lock)560 int BPF_KPROBE(kprobe_down_read_trylock, struct rw_semaphore *lock)
561 {
562 u32 tid = (u32)bpf_get_current_pid_tgid();
563
564 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
565 return 0;
566 }
567
568 SEC("kretprobe/down_read_trylock")
BPF_KRETPROBE(kprobe_down_read_trylock_exit,long ret)569 int BPF_KRETPROBE(kprobe_down_read_trylock_exit, long ret)
570 {
571 u32 tid = (u32)bpf_get_current_pid_tgid();
572 void **lock;
573
574 lock = bpf_map_lookup_elem(&locks, &tid);
575 if (!lock)
576 return 0;
577
578 bpf_map_delete_elem(&locks, &tid);
579
580 if (ret == 1) {
581 lock_contended(ctx, *lock);
582 lock_acquired(*lock);
583 }
584 return 0;
585 }
586
587 SEC("kprobe/down_read_interruptible")
BPF_KPROBE(kprobe_down_read_interruptible,struct rw_semaphore * lock)588 int BPF_KPROBE(kprobe_down_read_interruptible, struct rw_semaphore *lock)
589 {
590 u32 tid = (u32)bpf_get_current_pid_tgid();
591
592 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
593 lock_contended(ctx, lock);
594 return 0;
595 }
596
597 SEC("kretprobe/down_read_interruptible")
BPF_KRETPROBE(kprobe_down_read_interruptible_exit,long ret)598 int BPF_KRETPROBE(kprobe_down_read_interruptible_exit, long ret)
599 {
600 u32 tid = (u32)bpf_get_current_pid_tgid();
601 void **lock;
602
603 lock = bpf_map_lookup_elem(&locks, &tid);
604 if (!lock)
605 return 0;
606
607 bpf_map_delete_elem(&locks, &tid);
608
609 if (ret)
610 lock_aborted(*lock);
611 else
612 lock_acquired(*lock);
613 return 0;
614 }
615
616 SEC("kprobe/down_read_killable")
BPF_KPROBE(kprobe_down_read_killable,struct rw_semaphore * lock)617 int BPF_KPROBE(kprobe_down_read_killable, struct rw_semaphore *lock)
618 {
619 u32 tid = (u32)bpf_get_current_pid_tgid();
620
621 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
622 lock_contended(ctx, lock);
623 return 0;
624 }
625
626 SEC("kretprobe/down_read_killable")
BPF_KRETPROBE(kprobe_down_read_killable_exit,long ret)627 int BPF_KRETPROBE(kprobe_down_read_killable_exit, long ret)
628 {
629 u32 tid = (u32)bpf_get_current_pid_tgid();
630 void **lock;
631
632 lock = bpf_map_lookup_elem(&locks, &tid);
633 if (!lock)
634 return 0;
635
636 bpf_map_delete_elem(&locks, &tid);
637
638 if (ret)
639 lock_aborted(*lock);
640 else
641 lock_acquired(*lock);
642 return 0;
643 }
644
645 SEC("kprobe/up_read")
BPF_KPROBE(kprobe_up_read,struct rw_semaphore * lock)646 int BPF_KPROBE(kprobe_up_read, struct rw_semaphore *lock)
647 {
648 lock_released(lock);
649 return 0;
650 }
651
652 SEC("kprobe/down_write")
BPF_KPROBE(kprobe_down_write,struct rw_semaphore * lock)653 int BPF_KPROBE(kprobe_down_write, struct rw_semaphore *lock)
654 {
655 u32 tid = (u32)bpf_get_current_pid_tgid();
656
657 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
658 lock_contended(ctx, lock);
659 return 0;
660 }
661
662 SEC("kretprobe/down_write")
BPF_KRETPROBE(kprobe_down_write_exit,long ret)663 int BPF_KRETPROBE(kprobe_down_write_exit, long ret)
664 {
665 u32 tid = (u32)bpf_get_current_pid_tgid();
666 void **lock;
667
668 lock = bpf_map_lookup_elem(&locks, &tid);
669 if (!lock)
670 return 0;
671
672 bpf_map_delete_elem(&locks, &tid);
673
674 lock_acquired(*lock);
675 return 0;
676 }
677
678 SEC("kprobe/down_write_trylock")
BPF_KPROBE(kprobe_down_write_trylock,struct rw_semaphore * lock)679 int BPF_KPROBE(kprobe_down_write_trylock, struct rw_semaphore *lock)
680 {
681 u32 tid = (u32)bpf_get_current_pid_tgid();
682
683 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
684 return 0;
685 }
686
687 SEC("kretprobe/down_write_trylock")
BPF_KRETPROBE(kprobe_down_write_trylock_exit,long ret)688 int BPF_KRETPROBE(kprobe_down_write_trylock_exit, long ret)
689 {
690 u32 tid = (u32)bpf_get_current_pid_tgid();
691 void **lock;
692
693 lock = bpf_map_lookup_elem(&locks, &tid);
694 if (!lock)
695 return 0;
696
697 bpf_map_delete_elem(&locks, &tid);
698
699 if (ret == 1) {
700 lock_contended(ctx, *lock);
701 lock_acquired(*lock);
702 }
703 return 0;
704 }
705
706 SEC("kprobe/down_write_killable")
BPF_KPROBE(kprobe_down_write_killable,struct rw_semaphore * lock)707 int BPF_KPROBE(kprobe_down_write_killable, struct rw_semaphore *lock)
708 {
709 u32 tid = (u32)bpf_get_current_pid_tgid();
710
711 bpf_map_update_elem(&locks, &tid, &lock, BPF_ANY);
712 lock_contended(ctx, lock);
713 return 0;
714 }
715
716 SEC("kretprobe/down_write_killable")
BPF_KRETPROBE(kprobe_down_write_killable_exit,long ret)717 int BPF_KRETPROBE(kprobe_down_write_killable_exit, long ret)
718 {
719 u32 tid = (u32)bpf_get_current_pid_tgid();
720 void **lock;
721
722 lock = bpf_map_lookup_elem(&locks, &tid);
723 if (!lock)
724 return 0;
725
726 bpf_map_delete_elem(&locks, &tid);
727
728 if (ret)
729 lock_aborted(*lock);
730 else
731 lock_acquired(*lock);
732 return 0;
733 }
734
735 SEC("kprobe/up_write")
BPF_KPROBE(kprobe_up_write,struct rw_semaphore * lock)736 int BPF_KPROBE(kprobe_up_write, struct rw_semaphore *lock)
737 {
738 lock_released(lock);
739 return 0;
740 }
741
742 char LICENSE[] SEC("license") = "GPL";
743