1 #![allow(clippy::let_unit_value)]
2 #![warn(clippy::absolute_paths)]
3
4 mod common;
5
6 use std::collections::HashSet;
7 use std::env::current_exe;
8 use std::ffi::c_int;
9 use std::ffi::c_void;
10 use std::ffi::OsStr;
11 use std::fs;
12 use std::hint;
13 use std::io;
14 use std::io::Read;
15 use std::mem::size_of;
16 use std::mem::size_of_val;
17 use std::os::unix::io::AsFd;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::ptr;
21 use std::ptr::addr_of;
22 use std::slice;
23 use std::sync::mpsc::channel;
24 use std::time::Duration;
25
26 use libbpf_rs::num_possible_cpus;
27 use libbpf_rs::AsRawLibbpf;
28 use libbpf_rs::Iter;
29 use libbpf_rs::Linker;
30 use libbpf_rs::Map;
31 use libbpf_rs::MapCore;
32 use libbpf_rs::MapFlags;
33 use libbpf_rs::MapHandle;
34 use libbpf_rs::MapInfo;
35 use libbpf_rs::MapType;
36 use libbpf_rs::Object;
37 use libbpf_rs::ObjectBuilder;
38 use libbpf_rs::Program;
39 use libbpf_rs::ProgramInput;
40 use libbpf_rs::ProgramType;
41 use libbpf_rs::TracepointOpts;
42 use libbpf_rs::UprobeOpts;
43 use libbpf_rs::UsdtOpts;
44 use libbpf_rs::UserRingBuffer;
45 use plain::Plain;
46 use probe::probe;
47 use scopeguard::defer;
48 use tempfile::NamedTempFile;
49 use test_tag::tag;
50
51 use crate::common::bump_rlimit_mlock;
52 use crate::common::get_map;
53 use crate::common::get_map_mut;
54 use crate::common::get_prog_mut;
55 use crate::common::get_test_object;
56 use crate::common::get_test_object_path;
57 use crate::common::open_test_object;
58
59
60 /// A helper function for instantiating a `RingBuffer` with a callback meant to
61 /// be invoked when `action` is executed and that is intended to trigger a write
62 /// to said `RingBuffer` from kernel space, which then reads a single `i32` from
63 /// this buffer from user space and returns it.
with_ringbuffer<F>(map: &Map, action: F) -> i32 where F: FnOnce(),64 fn with_ringbuffer<F>(map: &Map, action: F) -> i32
65 where
66 F: FnOnce(),
67 {
68 let mut value = 0i32;
69 {
70 let callback = |data: &[u8]| {
71 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
72 0
73 };
74
75 let mut builder = libbpf_rs::RingBufferBuilder::new();
76 builder.add(map, callback).expect("failed to add ringbuf");
77 let mgr = builder.build().expect("failed to build");
78
79 action();
80 mgr.consume().expect("failed to consume ringbuf");
81 }
82
83 value
84 }
85
86 #[tag(root)]
87 #[test]
test_object_build_and_load()88 fn test_object_build_and_load() {
89 bump_rlimit_mlock();
90
91 get_test_object("runqslower.bpf.o");
92 }
93
94 #[test]
test_object_build_from_memory()95 fn test_object_build_from_memory() {
96 let obj_path = get_test_object_path("runqslower.bpf.o");
97 let contents = fs::read(obj_path).expect("failed to read object file");
98 let mut builder = ObjectBuilder::default();
99 let obj = builder
100 .name("memory name")
101 .unwrap()
102 .open_memory(&contents)
103 .expect("failed to build object");
104 let name = obj.name().expect("failed to get object name");
105 assert!(name == "memory name");
106
107 let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
108 let name = obj.name().expect("failed to get object name");
109 assert!(name == "memory name");
110 }
111
112 #[test]
test_object_build_from_memory_empty_name()113 fn test_object_build_from_memory_empty_name() {
114 let obj_path = get_test_object_path("runqslower.bpf.o");
115 let contents = fs::read(obj_path).expect("failed to read object file");
116 let mut builder = ObjectBuilder::default();
117 let obj = builder
118 .name("")
119 .unwrap()
120 .open_memory(&contents)
121 .expect("failed to build object");
122 let name = obj.name().expect("failed to get object name");
123 assert!(name.is_empty());
124
125 let obj = unsafe { Object::from_ptr(obj.take_ptr()) };
126 let name = obj.name().expect("failed to get object name");
127 assert!(name.is_empty());
128 }
129
130 /// Check that loading an object from an empty file fails as expected.
131 #[tag(root)]
132 #[test]
test_object_load_invalid()133 fn test_object_load_invalid() {
134 let empty_file = NamedTempFile::new().unwrap();
135 let _err = ObjectBuilder::default()
136 .debug(true)
137 .open_file(empty_file.path())
138 .unwrap_err();
139 }
140
141 #[test]
test_object_name()142 fn test_object_name() {
143 let obj_path = get_test_object_path("runqslower.bpf.o");
144 let mut builder = ObjectBuilder::default();
145 builder.name("test name").unwrap();
146 let obj = builder.open_file(obj_path).expect("failed to build object");
147 let obj_name = obj.name().expect("failed to get object name");
148 assert!(obj_name == "test name");
149 }
150
151 #[tag(root)]
152 #[test]
test_object_maps()153 fn test_object_maps() {
154 bump_rlimit_mlock();
155
156 let mut obj = get_test_object("runqslower.bpf.o");
157 let _map = get_map_mut(&mut obj, "start");
158 let _map = get_map_mut(&mut obj, "events");
159 assert!(!obj.maps().any(|map| map.name() == OsStr::new("asdf")));
160 }
161
162 #[tag(root)]
163 #[test]
test_object_maps_iter()164 fn test_object_maps_iter() {
165 bump_rlimit_mlock();
166
167 let obj = get_test_object("runqslower.bpf.o");
168 for map in obj.maps() {
169 eprintln!("{:?}", map.name());
170 }
171 // This will include .rodata and .bss, so our expected count is 4, not 2
172 assert!(obj.maps().count() == 4);
173 }
174
175 #[tag(root)]
176 #[test]
test_object_map_key_value_size()177 fn test_object_map_key_value_size() {
178 bump_rlimit_mlock();
179
180 let mut obj = get_test_object("runqslower.bpf.o");
181 let start = get_map_mut(&mut obj, "start");
182
183 assert!(start.lookup(&[1, 2, 3, 4, 5], MapFlags::empty()).is_err());
184 assert!(start.delete(&[1]).is_err());
185 assert!(start.lookup_and_delete(&[1, 2, 3, 4, 5]).is_err());
186 assert!(start
187 .update(&[1, 2, 3, 4, 5], &[1], MapFlags::empty())
188 .is_err());
189 }
190
191 #[tag(root)]
192 #[test]
test_object_map_update_batch()193 fn test_object_map_update_batch() {
194 bump_rlimit_mlock();
195
196 let mut obj = get_test_object("runqslower.bpf.o");
197 let start = get_map_mut(&mut obj, "start");
198
199 let key1 = 1u32.to_ne_bytes();
200 let key2 = 2u32.to_ne_bytes();
201 let key3 = 3u32.to_ne_bytes();
202 let key4 = 4u32.to_ne_bytes();
203
204 let value1 = 369u64.to_ne_bytes();
205 let value2 = 258u64.to_ne_bytes();
206 let value3 = 147u64.to_ne_bytes();
207 let value4 = 159u64.to_ne_bytes();
208
209 let batch_key1 = key1.into_iter().chain(key2).collect::<Vec<_>>();
210 let batch_value1 = value1.into_iter().chain(value2).collect::<Vec<_>>();
211
212 let batch_key2 = key2.into_iter().chain(key3).chain(key4).collect::<Vec<_>>();
213 let batch_value2 = value2
214 .into_iter()
215 .chain(value3)
216 .chain(value4)
217 .collect::<Vec<_>>();
218
219 // Update batch with wrong key size
220 assert!(start
221 .update_batch(
222 &[1, 2, 3],
223 &batch_value1,
224 2,
225 MapFlags::ANY,
226 MapFlags::NO_EXIST
227 )
228 .is_err());
229
230 // Update batch with wrong value size
231 assert!(start
232 .update_batch(
233 &batch_key1,
234 &[1, 2, 3],
235 2,
236 MapFlags::ANY,
237 MapFlags::NO_EXIST
238 )
239 .is_err());
240
241 // Update batch with wrong count.
242 assert!(start
243 .update_batch(
244 &batch_key1,
245 &batch_value1,
246 1,
247 MapFlags::ANY,
248 MapFlags::NO_EXIST
249 )
250 .is_err());
251
252 // Update batch with 1 key.
253 assert!(start
254 .update_batch(&key1, &value1, 1, MapFlags::ANY, MapFlags::NO_EXIST)
255 .is_ok());
256
257 // Update batch with multiple keys.
258 assert!(start
259 .update_batch(
260 &batch_key2,
261 &batch_value2,
262 3,
263 MapFlags::ANY,
264 MapFlags::NO_EXIST
265 )
266 .is_ok());
267
268 // Update batch with existing keys.
269 assert!(start
270 .update_batch(
271 &batch_key2,
272 &batch_value2,
273 3,
274 MapFlags::NO_EXIST,
275 MapFlags::NO_EXIST
276 )
277 .is_err());
278 }
279
280 #[tag(root)]
281 #[test]
test_object_map_delete_batch()282 fn test_object_map_delete_batch() {
283 bump_rlimit_mlock();
284
285 let mut obj = get_test_object("runqslower.bpf.o");
286 let start = get_map_mut(&mut obj, "start");
287
288 let key1 = 1u32.to_ne_bytes();
289 assert!(start
290 .update(&key1, &9999u64.to_ne_bytes(), MapFlags::ANY)
291 .is_ok());
292 let key2 = 2u32.to_ne_bytes();
293 assert!(start
294 .update(&key2, &42u64.to_ne_bytes(), MapFlags::ANY)
295 .is_ok());
296 let key3 = 3u32.to_ne_bytes();
297 assert!(start
298 .update(&key3, &18u64.to_ne_bytes(), MapFlags::ANY)
299 .is_ok());
300 let key4 = 4u32.to_ne_bytes();
301 assert!(start
302 .update(&key4, &1337u64.to_ne_bytes(), MapFlags::ANY)
303 .is_ok());
304
305 // Delete 1 incomplete key.
306 assert!(start
307 .delete_batch(&[0, 0, 1], 1, MapFlags::empty(), MapFlags::empty())
308 .is_err());
309 // Delete keys with wrong count.
310 assert!(start
311 .delete_batch(&key4, 2, MapFlags::empty(), MapFlags::empty())
312 .is_err());
313 // Delete 1 key successfully.
314 assert!(start
315 .delete_batch(&key4, 1, MapFlags::empty(), MapFlags::empty())
316 .is_ok());
317 // Delete remaining 3 keys.
318 let keys = key1.into_iter().chain(key2).chain(key3).collect::<Vec<_>>();
319 assert!(start
320 .delete_batch(&keys, 3, MapFlags::empty(), MapFlags::empty())
321 .is_ok());
322 // Map should be empty now.
323 assert!(start.keys().collect::<Vec<_>>().is_empty())
324 }
325
326 /// Test whether `MapInfo` works properly
327 #[tag(root)]
328 #[test]
test_map_info()329 pub fn test_map_info() {
330 #[allow(clippy::needless_update)]
331 let opts = libbpf_sys::bpf_map_create_opts {
332 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
333 map_flags: libbpf_sys::BPF_ANY,
334 btf_fd: 0,
335 btf_key_type_id: 0,
336 btf_value_type_id: 0,
337 btf_vmlinux_value_type_id: 0,
338 inner_map_fd: 0,
339 map_extra: 0,
340 numa_node: 0,
341 map_ifindex: 0,
342 // bpf_map_create_opts might have padding fields on some platform
343 ..Default::default()
344 };
345
346 let map = MapHandle::create(MapType::Hash, Some("simple_map"), 8, 64, 1024, &opts).unwrap();
347 let map_info = MapInfo::new(map.as_fd()).unwrap();
348 let name_received = map_info.name().unwrap();
349 assert_eq!(name_received, "simple_map");
350 assert_eq!(map_info.map_type(), MapType::Hash);
351 assert_eq!(map_info.flags() & MapFlags::ANY, MapFlags::ANY);
352
353 let map_info = &map_info.info;
354 assert_eq!(map_info.key_size, 8);
355 assert_eq!(map_info.value_size, 64);
356 assert_eq!(map_info.max_entries, 1024);
357 assert_eq!(map_info.btf_id, 0);
358 assert_eq!(map_info.btf_key_type_id, 0);
359 assert_eq!(map_info.btf_value_type_id, 0);
360 assert_eq!(map_info.btf_vmlinux_value_type_id, 0);
361 assert_eq!(map_info.map_extra, 0);
362 assert_eq!(map_info.ifindex, 0);
363 }
364
365 #[tag(root)]
366 #[test]
test_object_percpu_lookup()367 fn test_object_percpu_lookup() {
368 bump_rlimit_mlock();
369
370 let mut obj = get_test_object("percpu_map.bpf.o");
371 let map = get_map_mut(&mut obj, "percpu_map");
372 let res = map
373 .lookup_percpu(&(0_u32).to_ne_bytes(), MapFlags::ANY)
374 .expect("failed to lookup")
375 .expect("failed to find value for key");
376
377 assert_eq!(
378 res.len(),
379 num_possible_cpus().expect("must be one value per cpu")
380 );
381 assert_eq!(res[0].len(), size_of::<u32>());
382 }
383
384 #[tag(root)]
385 #[test]
test_object_percpu_invalid_lookup_fn()386 fn test_object_percpu_invalid_lookup_fn() {
387 bump_rlimit_mlock();
388
389 let mut obj = get_test_object("percpu_map.bpf.o");
390 let map = get_map_mut(&mut obj, "percpu_map");
391
392 assert!(map.lookup(&(0_u32).to_ne_bytes(), MapFlags::ANY).is_err());
393 }
394
395 #[tag(root)]
396 #[test]
test_object_percpu_update()397 fn test_object_percpu_update() {
398 bump_rlimit_mlock();
399
400 let mut obj = get_test_object("percpu_map.bpf.o");
401 let map = get_map_mut(&mut obj, "percpu_map");
402 let key = (0_u32).to_ne_bytes();
403
404 let mut vals: Vec<Vec<u8>> = Vec::new();
405 for i in 0..num_possible_cpus().unwrap() {
406 vals.push((i as u32).to_ne_bytes().to_vec());
407 }
408
409 map.update_percpu(&key, &vals, MapFlags::ANY)
410 .expect("failed to update map");
411
412 let res = map
413 .lookup_percpu(&key, MapFlags::ANY)
414 .expect("failed to lookup")
415 .expect("failed to find value for key");
416
417 assert_eq!(vals, res);
418 }
419
420 #[tag(root)]
421 #[test]
test_object_percpu_invalid_update_fn()422 fn test_object_percpu_invalid_update_fn() {
423 bump_rlimit_mlock();
424
425 let mut obj = get_test_object("percpu_map.bpf.o");
426 let map = get_map_mut(&mut obj, "percpu_map");
427 let key = (0_u32).to_ne_bytes();
428
429 let val = (1_u32).to_ne_bytes().to_vec();
430
431 assert!(map.update(&key, &val, MapFlags::ANY).is_err());
432 }
433
434 #[tag(root)]
435 #[test]
test_object_percpu_lookup_update()436 fn test_object_percpu_lookup_update() {
437 bump_rlimit_mlock();
438
439 let mut obj = get_test_object("percpu_map.bpf.o");
440 let map = get_map_mut(&mut obj, "percpu_map");
441 let key = (0_u32).to_ne_bytes();
442
443 let mut res = map
444 .lookup_percpu(&key, MapFlags::ANY)
445 .expect("failed to lookup")
446 .expect("failed to find value for key");
447
448 for e in res.iter_mut() {
449 e[0] &= 0xf0;
450 }
451
452 map.update_percpu(&key, &res, MapFlags::ANY)
453 .expect("failed to update after first lookup");
454
455 let res2 = map
456 .lookup_percpu(&key, MapFlags::ANY)
457 .expect("failed to lookup")
458 .expect("failed to find value for key");
459
460 assert_eq!(res, res2);
461 }
462
463 #[tag(root)]
464 #[test]
test_object_map_empty_lookup()465 fn test_object_map_empty_lookup() {
466 bump_rlimit_mlock();
467
468 let mut obj = get_test_object("runqslower.bpf.o");
469 let start = get_map_mut(&mut obj, "start");
470
471 assert!(start
472 .lookup(&[1, 2, 3, 4], MapFlags::empty())
473 .expect("err in map lookup")
474 .is_none());
475 }
476
477 /// Test CRUD operations on map of type queue.
478 #[tag(root)]
479 #[test]
test_object_map_queue_crud()480 fn test_object_map_queue_crud() {
481 bump_rlimit_mlock();
482
483 let mut obj = get_test_object("tracepoint.bpf.o");
484 let queue = get_map_mut(&mut obj, "queue");
485
486 let key: [u8; 0] = [];
487 let value1 = 42u32.to_ne_bytes();
488 let value2 = 43u32.to_ne_bytes();
489
490 // Test queue, FIFO expected
491 queue
492 .update(&key, &value1, MapFlags::ANY)
493 .expect("failed to update in queue");
494 queue
495 .update(&key, &value2, MapFlags::ANY)
496 .expect("failed to update in queue");
497
498 let mut val = queue
499 .lookup(&key, MapFlags::ANY)
500 .expect("failed to peek the queue")
501 .expect("failed to retrieve value");
502 assert_eq!(val.len(), 4);
503 assert_eq!(&val, &value1);
504
505 val = queue
506 .lookup_and_delete(&key)
507 .expect("failed to pop from queue")
508 .expect("failed to retrieve value");
509 assert_eq!(val.len(), 4);
510 assert_eq!(&val, &value1);
511
512 val = queue
513 .lookup_and_delete(&key)
514 .expect("failed to pop from queue")
515 .expect("failed to retrieve value");
516 assert_eq!(val.len(), 4);
517 assert_eq!(&val, &value2);
518
519 assert!(queue
520 .lookup_and_delete(&key)
521 .expect("failed to pop from queue")
522 .is_none());
523 }
524
525 /// Test CRUD operations on map of type bloomfilter.
526 #[tag(root)]
527 #[test]
test_object_map_bloom_filter_crud()528 fn test_object_map_bloom_filter_crud() {
529 bump_rlimit_mlock();
530
531 let mut obj = get_test_object("tracepoint.bpf.o");
532 let bloom_filter = get_map_mut(&mut obj, "bloom_filter");
533
534 let key: [u8; 0] = [];
535 let value1 = 1337u32.to_ne_bytes();
536 let value2 = 2674u32.to_ne_bytes();
537
538 bloom_filter
539 .update(&key, &value1, MapFlags::ANY)
540 .expect("failed to add entry value1 to bloom filter");
541
542 bloom_filter
543 .update(&key, &value2, MapFlags::ANY)
544 .expect("failed to add entry value2 in bloom filter");
545
546 // Non empty keys should result in an error
547 bloom_filter
548 .update(&value1, &value1, MapFlags::ANY)
549 .expect_err("Non empty key should return an error");
550
551 for inserted_value in [value1, value2] {
552 let val = bloom_filter
553 .lookup_bloom_filter(&inserted_value)
554 .expect("failed retrieve item from bloom filter");
555
556 assert!(val);
557 }
558 // Test non existing element
559 let enoent_found = bloom_filter
560 .lookup_bloom_filter(&[1, 2, 3, 4])
561 .expect("failed retrieve item from bloom filter");
562
563 assert!(!enoent_found);
564
565 // Calling lookup should result in an error
566 bloom_filter
567 .lookup(&[1, 2, 3, 4], MapFlags::ANY)
568 .expect_err("lookup should fail since we should use lookup_bloom_filter");
569
570 // Deleting should not be possible
571 bloom_filter
572 .lookup_and_delete(&key)
573 .expect_err("Expect delete to fail");
574 }
575
576 /// Test CRUD operations on map of type stack.
577 #[tag(root)]
578 #[test]
test_object_map_stack_crud()579 fn test_object_map_stack_crud() {
580 bump_rlimit_mlock();
581
582 let mut obj = get_test_object("tracepoint.bpf.o");
583 let stack = get_map_mut(&mut obj, "stack");
584
585 let key: [u8; 0] = [];
586 let value1 = 1337u32.to_ne_bytes();
587 let value2 = 2674u32.to_ne_bytes();
588
589 stack
590 .update(&key, &value1, MapFlags::ANY)
591 .expect("failed to update in stack");
592 stack
593 .update(&key, &value2, MapFlags::ANY)
594 .expect("failed to update in stack");
595
596 let mut val = stack
597 .lookup(&key, MapFlags::ANY)
598 .expect("failed to pop from stack")
599 .expect("failed to retrieve value");
600
601 assert_eq!(val.len(), 4);
602 assert_eq!(&val, &value2);
603
604 val = stack
605 .lookup_and_delete(&key)
606 .expect("failed to pop from stack")
607 .expect("failed to retrieve value");
608 assert_eq!(val.len(), 4);
609 assert_eq!(&val, &value2);
610
611 val = stack
612 .lookup_and_delete(&key)
613 .expect("failed to pop from stack")
614 .expect("failed to retrieve value");
615 assert_eq!(val.len(), 4);
616 assert_eq!(&val, &value1);
617
618 assert!(stack
619 .lookup_and_delete(&key)
620 .expect("failed to pop from stack")
621 .is_none());
622 }
623
624 #[tag(root)]
625 #[test]
test_object_map_mutation()626 fn test_object_map_mutation() {
627 bump_rlimit_mlock();
628
629 let mut obj = get_test_object("runqslower.bpf.o");
630 let start = get_map_mut(&mut obj, "start");
631 start
632 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
633 .expect("failed to write");
634 let val = start
635 .lookup(&[1, 2, 3, 4], MapFlags::empty())
636 .expect("failed to read map")
637 .expect("failed to find key");
638 assert_eq!(val.len(), 8);
639 assert_eq!(val, &[1, 2, 3, 4, 5, 6, 7, 8]);
640
641 start.delete(&[1, 2, 3, 4]).expect("failed to delete key");
642
643 assert!(start
644 .lookup(&[1, 2, 3, 4], MapFlags::empty())
645 .expect("failed to read map")
646 .is_none());
647 }
648
649 #[tag(root)]
650 #[test]
test_object_map_lookup_flags()651 fn test_object_map_lookup_flags() {
652 bump_rlimit_mlock();
653
654 let mut obj = get_test_object("runqslower.bpf.o");
655 let start = get_map_mut(&mut obj, "start");
656 start
657 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
658 .expect("failed to write");
659 assert!(start
660 .update(&[1, 2, 3, 4], &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::NO_EXIST)
661 .is_err());
662 }
663
664 #[tag(root)]
665 #[test]
test_object_map_key_iter()666 fn test_object_map_key_iter() {
667 bump_rlimit_mlock();
668
669 let mut obj = get_test_object("runqslower.bpf.o");
670 let start = get_map_mut(&mut obj, "start");
671
672 let key1 = vec![1, 2, 3, 4];
673 let key2 = vec![1, 2, 3, 5];
674 let key3 = vec![1, 2, 3, 6];
675
676 start
677 .update(&key1, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
678 .expect("failed to write");
679 start
680 .update(&key2, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
681 .expect("failed to write");
682 start
683 .update(&key3, &[1, 2, 3, 4, 5, 6, 7, 8], MapFlags::empty())
684 .expect("failed to write");
685
686 let mut keys = HashSet::new();
687 for key in start.keys() {
688 keys.insert(key);
689 }
690 assert_eq!(keys.len(), 3);
691 assert!(keys.contains(&key1));
692 assert!(keys.contains(&key2));
693 assert!(keys.contains(&key3));
694 }
695
696 #[tag(root)]
697 #[test]
test_object_map_key_iter_empty()698 fn test_object_map_key_iter_empty() {
699 bump_rlimit_mlock();
700
701 let mut obj = get_test_object("runqslower.bpf.o");
702 let start = get_map_mut(&mut obj, "start");
703 let mut count = 0;
704 for _ in start.keys() {
705 count += 1;
706 }
707 assert_eq!(count, 0);
708 }
709
710 #[tag(root)]
711 #[test]
test_object_map_pin()712 fn test_object_map_pin() {
713 bump_rlimit_mlock();
714
715 let mut obj = get_test_object("runqslower.bpf.o");
716 let mut map = get_map_mut(&mut obj, "start");
717 let path = "/sys/fs/bpf/mymap_test_object_map_pin";
718
719 // Unpinning a unpinned map should be an error
720 assert!(map.unpin(path).is_err());
721 assert!(!Path::new(path).exists());
722
723 // Pin and unpin should be successful
724 map.pin(path).expect("failed to pin map");
725 assert!(Path::new(path).exists());
726 map.unpin(path).expect("failed to unpin map");
727 assert!(!Path::new(path).exists());
728 }
729
730 #[tag(root)]
731 #[test]
test_object_loading_pinned_map_from_path()732 fn test_object_loading_pinned_map_from_path() {
733 bump_rlimit_mlock();
734
735 let mut obj = get_test_object("runqslower.bpf.o");
736 let mut map = get_map_mut(&mut obj, "start");
737 let path = "/sys/fs/bpf/mymap_test_pin_to_load_from_path";
738
739 map.pin(path).expect("pinning map failed");
740
741 let pinned_map = MapHandle::from_pinned_path(path).expect("loading a map from a path failed");
742 map.unpin(path).expect("unpinning map failed");
743
744 assert_eq!(map.name(), pinned_map.name());
745 assert_eq!(
746 map.info().unwrap().info.id,
747 pinned_map.info().unwrap().info.id
748 );
749 }
750
751 #[tag(root)]
752 #[test]
test_object_loading_loaded_map_from_id()753 fn test_object_loading_loaded_map_from_id() {
754 bump_rlimit_mlock();
755
756 let mut obj = get_test_object("runqslower.bpf.o");
757 let map = get_map_mut(&mut obj, "start");
758 let id = map.info().expect("to get info from map 'start'").info.id;
759
760 let map_by_id = MapHandle::from_map_id(id).expect("map to load from id");
761
762 assert_eq!(map.name(), map_by_id.name());
763 assert_eq!(
764 map.info().unwrap().info.id,
765 map_by_id.info().unwrap().info.id
766 );
767 }
768
769 #[tag(root)]
770 #[test]
test_object_programs()771 fn test_object_programs() {
772 bump_rlimit_mlock();
773
774 let mut obj = get_test_object("runqslower.bpf.o");
775 let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
776 let _prog = get_prog_mut(&mut obj, "handle__sched_wakeup_new");
777 let _prog = get_prog_mut(&mut obj, "handle__sched_switch");
778 assert!(!obj.progs().any(|prog| prog.name() == OsStr::new("asdf")));
779 }
780
781 #[tag(root)]
782 #[test]
test_object_programs_iter_mut()783 fn test_object_programs_iter_mut() {
784 bump_rlimit_mlock();
785
786 let obj = get_test_object("runqslower.bpf.o");
787 assert!(obj.progs().count() == 3);
788 }
789
790 #[tag(root)]
791 #[test]
test_object_program_pin()792 fn test_object_program_pin() {
793 bump_rlimit_mlock();
794
795 let mut obj = get_test_object("runqslower.bpf.o");
796 let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
797 let path = "/sys/fs/bpf/myprog";
798
799 // Unpinning a unpinned prog should be an error
800 assert!(prog.unpin(path).is_err());
801 assert!(!Path::new(path).exists());
802
803 // Pin should be successful
804 prog.pin(path).expect("failed to pin prog");
805 assert!(Path::new(path).exists());
806
807 // Backup cleanup method in case test errors
808 defer! {
809 let _ = fs::remove_file(path);
810 }
811
812 // Unpin should be successful
813 prog.unpin(path).expect("failed to unpin prog");
814 assert!(!Path::new(path).exists());
815 }
816
817 #[tag(root)]
818 #[test]
test_object_link_pin()819 fn test_object_link_pin() {
820 bump_rlimit_mlock();
821
822 let mut obj = get_test_object("runqslower.bpf.o");
823 let mut prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
824 let mut link = prog.attach().expect("failed to attach prog");
825
826 let path = "/sys/fs/bpf/mylink";
827
828 // Unpinning a unpinned prog should be an error
829 assert!(link.unpin().is_err());
830 assert!(!Path::new(path).exists());
831
832 // Pin should be successful
833 link.pin(path).expect("failed to pin prog");
834 assert!(Path::new(path).exists());
835
836 // Backup cleanup method in case test errors
837 defer! {
838 let _ = fs::remove_file(path);
839 }
840
841 // Unpin should be successful
842 link.unpin().expect("failed to unpin prog");
843 assert!(!Path::new(path).exists());
844 }
845
846 #[tag(root)]
847 #[test]
test_object_reuse_pined_map()848 fn test_object_reuse_pined_map() {
849 bump_rlimit_mlock();
850
851 let path = "/sys/fs/bpf/mymap_test_object_reuse_pined_map";
852 let key = vec![1, 2, 3, 4];
853 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
854
855 // Pin a map
856 {
857 let mut obj = get_test_object("runqslower.bpf.o");
858 let mut map = get_map_mut(&mut obj, "start");
859 map.update(&key, &val, MapFlags::empty())
860 .expect("failed to write");
861
862 // Pin map
863 map.pin(path).expect("failed to pin map");
864 assert!(Path::new(path).exists());
865 }
866
867 // Backup cleanup method in case test errors somewhere
868 defer! {
869 let _ = fs::remove_file(path);
870 }
871
872 // Reuse the pinned map
873 let obj_path = get_test_object_path("runqslower.bpf.o");
874 let mut builder = ObjectBuilder::default();
875 builder.debug(true);
876 let mut open_obj = builder.open_file(obj_path).expect("failed to open object");
877 let mut start = open_obj
878 .maps_mut()
879 .find(|map| map.name() == OsStr::new("start"))
880 .expect("failed to find `start` map");
881 assert!(start.reuse_pinned_map("/asdf").is_err());
882 start.reuse_pinned_map(path).expect("failed to reuse map");
883
884 let mut obj = open_obj.load().expect("failed to load object");
885 let mut reused_map = get_map_mut(&mut obj, "start");
886 let found_val = reused_map
887 .lookup(&key, MapFlags::empty())
888 .expect("failed to read map")
889 .expect("failed to find key");
890 assert_eq!(&found_val, &val);
891
892 // Cleanup
893 reused_map.unpin(path).expect("failed to unpin map");
894 assert!(!Path::new(path).exists());
895 }
896
897 #[tag(root)]
898 #[test]
test_object_ringbuf_raw()899 fn test_object_ringbuf_raw() {
900 bump_rlimit_mlock();
901
902 let mut obj = get_test_object("ringbuf.bpf.o");
903 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
904 let _link = prog.attach().expect("failed to attach prog");
905
906 static mut V1: i32 = 0;
907 static mut V2: i32 = 0;
908
909 fn callback1(data: &[u8]) -> i32 {
910 let mut value: i32 = 0;
911 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
912
913 unsafe {
914 V1 = value;
915 }
916
917 0
918 }
919
920 fn callback2(data: &[u8]) -> i32 {
921 let mut value: i32 = 0;
922 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
923
924 unsafe {
925 V2 = value;
926 }
927
928 0
929 }
930
931 // Test trying to build without adding any ringbufs
932 // Can't use expect_err here since RingBuffer does not implement Debug
933 let builder = libbpf_rs::RingBufferBuilder::new();
934 assert!(
935 builder.build().is_err(),
936 "Should not be able to build without adding at least one ringbuf"
937 );
938
939 // Test building with multiple map objects
940 let mut builder = libbpf_rs::RingBufferBuilder::new();
941
942 // Add a first map and callback
943 let map1 = get_map(&obj, "ringbuf1");
944 builder
945 .add(&map1, callback1)
946 .expect("failed to add ringbuf");
947
948 // Add a second map and callback
949 let map2 = get_map(&obj, "ringbuf2");
950 builder
951 .add(&map2, callback2)
952 .expect("failed to add ringbuf");
953
954 let mgr = builder.build().expect("failed to build");
955
956 // Call getpid to ensure the BPF program runs
957 unsafe { libc::getpid() };
958
959 // Test raw primitives
960 let ret = mgr.consume_raw();
961
962 // We can't check for exact return values, since other tasks in the system may call getpid(),
963 // triggering the BPF program
964 assert!(ret >= 2);
965
966 unsafe { assert_eq!(V1, 1) };
967 unsafe { assert_eq!(V2, 2) };
968
969 // Consume from a (potentially) empty ring buffer
970 let ret = mgr.consume_raw();
971 assert!(ret >= 0);
972
973 // Consume from a (potentially) empty ring buffer using poll()
974 let ret = mgr.poll_raw(Duration::from_millis(100));
975 assert!(ret >= 0);
976 }
977
978 #[tag(root)]
979 #[test]
test_object_ringbuf_err_callback()980 fn test_object_ringbuf_err_callback() {
981 bump_rlimit_mlock();
982
983 let mut obj = get_test_object("ringbuf.bpf.o");
984 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
985 let _link = prog.attach().expect("failed to attach prog");
986
987 // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
988 fn callback1(_data: &[u8]) -> i32 {
989 -libc::ENOENT
990 }
991
992 // Immediately trigger an error that should be reported back to the consume_raw() or poll_raw()
993 fn callback2(_data: &[u8]) -> i32 {
994 -libc::EPERM
995 }
996
997 // Test trying to build without adding any ringbufs
998 // Can't use expect_err here since RingBuffer does not implement Debug
999 let builder = libbpf_rs::RingBufferBuilder::new();
1000 assert!(
1001 builder.build().is_err(),
1002 "Should not be able to build without adding at least one ringbuf"
1003 );
1004
1005 // Test building with multiple map objects
1006 let mut builder = libbpf_rs::RingBufferBuilder::new();
1007
1008 // Add a first map and callback
1009 let map1 = get_map(&obj, "ringbuf1");
1010 builder
1011 .add(&map1, callback1)
1012 .expect("failed to add ringbuf");
1013
1014 // Add a second map and callback
1015 let map2 = get_map(&obj, "ringbuf2");
1016 builder
1017 .add(&map2, callback2)
1018 .expect("failed to add ringbuf");
1019
1020 let mgr = builder.build().expect("failed to build");
1021
1022 // Call getpid to ensure the BPF program runs
1023 unsafe { libc::getpid() };
1024
1025 // Test raw primitives
1026 let ret = mgr.consume_raw();
1027
1028 // The error originated from the first callback executed should be reported here, either
1029 // from callback1() or callback2()
1030 assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
1031
1032 unsafe { libc::getpid() };
1033
1034 // The same behavior should happen with poll_raw()
1035 let ret = mgr.poll_raw(Duration::from_millis(100));
1036
1037 assert!(ret == -libc::ENOENT || ret == -libc::EPERM);
1038 }
1039
1040 #[tag(root)]
1041 #[test]
test_object_ringbuf()1042 fn test_object_ringbuf() {
1043 bump_rlimit_mlock();
1044
1045 let mut obj = get_test_object("ringbuf.bpf.o");
1046 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1047 let _link = prog.attach().expect("failed to attach prog");
1048
1049 static mut V1: i32 = 0;
1050 static mut V2: i32 = 0;
1051
1052 fn callback1(data: &[u8]) -> i32 {
1053 let mut value: i32 = 0;
1054 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1055
1056 unsafe {
1057 V1 = value;
1058 }
1059
1060 0
1061 }
1062
1063 fn callback2(data: &[u8]) -> i32 {
1064 let mut value: i32 = 0;
1065 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1066
1067 unsafe {
1068 V2 = value;
1069 }
1070
1071 0
1072 }
1073
1074 // Test trying to build without adding any ringbufs
1075 // Can't use expect_err here since RingBuffer does not implement Debug
1076 let builder = libbpf_rs::RingBufferBuilder::new();
1077 assert!(
1078 builder.build().is_err(),
1079 "Should not be able to build without adding at least one ringbuf"
1080 );
1081
1082 // Test building with multiple map objects
1083 let mut builder = libbpf_rs::RingBufferBuilder::new();
1084
1085 // Add a first map and callback
1086 let map1 = get_map(&obj, "ringbuf1");
1087 builder
1088 .add(&map1, callback1)
1089 .expect("failed to add ringbuf");
1090
1091 // Add a second map and callback
1092 let map2 = get_map(&obj, "ringbuf2");
1093 builder
1094 .add(&map2, callback2)
1095 .expect("failed to add ringbuf");
1096
1097 let mgr = builder.build().expect("failed to build");
1098
1099 // Call getpid to ensure the BPF program runs
1100 unsafe { libc::getpid() };
1101
1102 // This should result in both callbacks being called
1103 mgr.consume().expect("failed to consume ringbuf");
1104
1105 // Our values should both reflect that the callbacks have been called
1106 unsafe { assert_eq!(V1, 1) };
1107 unsafe { assert_eq!(V2, 2) };
1108
1109 // Reset both values
1110 unsafe { V1 = 0 };
1111 unsafe { V2 = 0 };
1112
1113 // Call getpid to ensure the BPF program runs
1114 unsafe { libc::getpid() };
1115
1116 // This should result in both callbacks being called
1117 mgr.poll(Duration::from_millis(100))
1118 .expect("failed to poll ringbuf");
1119
1120 // Our values should both reflect that the callbacks have been called
1121 unsafe { assert_eq!(V1, 1) };
1122 unsafe { assert_eq!(V2, 2) };
1123 }
1124
1125 #[tag(root)]
1126 #[test]
test_object_ringbuf_closure()1127 fn test_object_ringbuf_closure() {
1128 bump_rlimit_mlock();
1129
1130 let mut obj = get_test_object("ringbuf.bpf.o");
1131 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1132 let _link = prog.attach().expect("failed to attach prog");
1133
1134 let (sender1, receiver1) = channel();
1135 let callback1 = move |data: &[u8]| -> i32 {
1136 let mut value: i32 = 0;
1137 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1138
1139 sender1.send(value).expect("failed to send value");
1140
1141 0
1142 };
1143
1144 let (sender2, receiver2) = channel();
1145 let callback2 = move |data: &[u8]| -> i32 {
1146 let mut value: i32 = 0;
1147 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1148
1149 sender2.send(value).expect("failed to send value");
1150
1151 0
1152 };
1153
1154 // Test trying to build without adding any ringbufs
1155 // Can't use expect_err here since RingBuffer does not implement Debug
1156 let builder = libbpf_rs::RingBufferBuilder::new();
1157 assert!(
1158 builder.build().is_err(),
1159 "Should not be able to build without adding at least one ringbuf"
1160 );
1161
1162 // Test building with multiple map objects
1163 let mut builder = libbpf_rs::RingBufferBuilder::new();
1164
1165 // Add a first map and callback
1166 let map1 = get_map(&obj, "ringbuf1");
1167 builder
1168 .add(&map1, callback1)
1169 .expect("failed to add ringbuf");
1170
1171 // Add a second map and callback
1172 let map2 = get_map(&obj, "ringbuf2");
1173 builder
1174 .add(&map2, callback2)
1175 .expect("failed to add ringbuf");
1176
1177 let mgr = builder.build().expect("failed to build");
1178
1179 // Call getpid to ensure the BPF program runs
1180 unsafe { libc::getpid() };
1181
1182 // This should result in both callbacks being called
1183 mgr.consume().expect("failed to consume ringbuf");
1184
1185 let v1 = receiver1.recv().expect("failed to receive value");
1186 let v2 = receiver2.recv().expect("failed to receive value");
1187
1188 assert_eq!(v1, 1);
1189 assert_eq!(v2, 2);
1190 }
1191
1192 /// Check that `RingBuffer` works correctly even if the map file descriptors
1193 /// provided during construction are closed. This test validates that `libbpf`'s
1194 /// refcount behavior is correctly reflected in our `RingBuffer` lifetimes.
1195 #[tag(root)]
1196 #[test]
test_object_ringbuf_with_closed_map()1197 fn test_object_ringbuf_with_closed_map() {
1198 bump_rlimit_mlock();
1199
1200 fn test(poll_fn: impl FnOnce(&libbpf_rs::RingBuffer)) {
1201 let mut value = 0i32;
1202
1203 {
1204 let mut obj = get_test_object("tracepoint.bpf.o");
1205 let mut prog = get_prog_mut(&mut obj, "handle__tracepoint");
1206 let _link = prog
1207 .attach_tracepoint("syscalls", "sys_enter_getpid")
1208 .expect("failed to attach prog");
1209
1210 let map = get_map_mut(&mut obj, "ringbuf");
1211
1212 let callback = |data: &[u8]| {
1213 plain::copy_from_bytes(&mut value, data).expect("Wrong size");
1214 0
1215 };
1216
1217 let mut builder = libbpf_rs::RingBufferBuilder::new();
1218 builder.add(&map, callback).expect("failed to add ringbuf");
1219 let ringbuf = builder.build().expect("failed to build");
1220
1221 drop(obj);
1222
1223 // Trigger the tracepoint. At this point `map` along with the containing
1224 // `obj` have been destroyed.
1225 let _pid = unsafe { libc::getpid() };
1226 let () = poll_fn(&ringbuf);
1227 }
1228
1229 // If we see a 1 here the ring buffer was still working as expected.
1230 assert_eq!(value, 1);
1231 }
1232
1233 test(|ringbuf| ringbuf.consume().expect("failed to consume ringbuf"));
1234 test(|ringbuf| {
1235 ringbuf
1236 .poll(Duration::from_secs(5))
1237 .expect("failed to poll ringbuf")
1238 });
1239 }
1240
1241 #[tag(root)]
1242 #[test]
test_object_user_ringbuf()1243 fn test_object_user_ringbuf() {
1244 #[repr(C)]
1245 struct MyStruct {
1246 key: u32,
1247 value: u32,
1248 }
1249
1250 unsafe impl Plain for MyStruct {}
1251
1252 bump_rlimit_mlock();
1253
1254 let mut obj = get_test_object("user_ringbuf.bpf.o");
1255 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1256 let _link = prog.attach().expect("failed to attach prog");
1257 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1258 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1259 let mut urb_sample = user_ringbuf
1260 .reserve(size_of::<MyStruct>())
1261 .expect("failed to reserve space");
1262 let bytes = urb_sample.as_mut();
1263 let my_struct = plain::from_mut_bytes::<MyStruct>(bytes).expect("failed to convert bytes");
1264 my_struct.key = 42;
1265 my_struct.value = 1337;
1266 user_ringbuf
1267 .submit(urb_sample)
1268 .expect("failed to submit sample");
1269
1270 // Trigger BPF program.
1271 let _pid = unsafe { libc::getpid() };
1272
1273 // At this point, the BPF program should have run and consumed the sample in
1274 // the user ring buffer, and stored the key/value in the samples map.
1275 let samples_map = get_map_mut(&mut obj, "samples");
1276 let key: u32 = 42;
1277 let value: u32 = 1337;
1278 let res = samples_map
1279 .lookup(&key.to_ne_bytes(), MapFlags::ANY)
1280 .expect("failed to lookup")
1281 .expect("failed to find value for key");
1282
1283 // The value in the samples map should be the same as the value we submitted
1284 assert_eq!(res.len(), size_of::<u32>());
1285 let mut array = [0; size_of::<u32>()];
1286 array.copy_from_slice(&res[..]);
1287 assert_eq!(u32::from_ne_bytes(array), value);
1288 }
1289
1290 #[tag(root)]
1291 #[test]
test_object_user_ringbuf_reservation_too_big()1292 fn test_object_user_ringbuf_reservation_too_big() {
1293 bump_rlimit_mlock();
1294
1295 let mut obj = get_test_object("user_ringbuf.bpf.o");
1296 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1297 let _link = prog.attach().expect("failed to attach prog");
1298 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1299 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1300 let err = user_ringbuf.reserve(1024 * 1024).unwrap_err();
1301 assert!(
1302 err.to_string().contains("requested size is too large"),
1303 "{err:#}"
1304 );
1305 }
1306
1307 #[tag(root)]
1308 #[test]
test_object_user_ringbuf_not_enough_space()1309 fn test_object_user_ringbuf_not_enough_space() {
1310 bump_rlimit_mlock();
1311
1312 let mut obj = get_test_object("user_ringbuf.bpf.o");
1313 let mut prog = get_prog_mut(&mut obj, "handle__sys_enter_getpid");
1314 let _link = prog.attach().expect("failed to attach prog");
1315 let urb_map = get_map_mut(&mut obj, "user_ringbuf");
1316 let user_ringbuf = UserRingBuffer::new(&urb_map).expect("failed to create user ringbuf");
1317 let _ = user_ringbuf
1318 .reserve(1024 * 3)
1319 .expect("failed to reserve space");
1320 let err = user_ringbuf.reserve(1024 * 3).unwrap_err();
1321 assert!(
1322 err.to_string()
1323 .contains("not enough space in the ring buffer"),
1324 "{err:#}"
1325 );
1326 }
1327
1328 #[tag(root)]
1329 #[test]
test_object_task_iter()1330 fn test_object_task_iter() {
1331 bump_rlimit_mlock();
1332
1333 let mut obj = get_test_object("taskiter.bpf.o");
1334 let mut prog = get_prog_mut(&mut obj, "dump_pid");
1335 let link = prog.attach().expect("failed to attach prog");
1336 let mut iter = Iter::new(&link).expect("failed to create iterator");
1337
1338 #[repr(C)]
1339 #[derive(Clone, Copy)]
1340 struct IndexPidPair {
1341 i: u32,
1342 pid: i32,
1343 }
1344
1345 unsafe impl Plain for IndexPidPair {}
1346
1347 let mut buf = Vec::new();
1348 let bytes_read = iter
1349 .read_to_end(&mut buf)
1350 .expect("failed to read from iterator");
1351
1352 assert!(bytes_read > 0);
1353 assert_eq!(bytes_read % size_of::<IndexPidPair>(), 0);
1354 let items: &[IndexPidPair] =
1355 plain::slice_from_bytes(buf.as_slice()).expect("Input slice cannot satisfy length");
1356
1357 assert!(!items.is_empty());
1358 assert_eq!(items[0].i, 0);
1359 assert!(items.windows(2).all(|w| w[0].i + 1 == w[1].i));
1360 // Check for init
1361 assert!(items.iter().any(|&item| item.pid == 1));
1362 }
1363
1364 #[tag(root)]
1365 #[test]
test_object_map_iter()1366 fn test_object_map_iter() {
1367 bump_rlimit_mlock();
1368
1369 // Create a map for iteration test.
1370 let opts = libbpf_sys::bpf_map_create_opts {
1371 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1372 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1373 ..Default::default()
1374 };
1375 let map = MapHandle::create(
1376 MapType::Hash,
1377 Some("mymap_test_object_map_iter"),
1378 4,
1379 8,
1380 8,
1381 &opts,
1382 )
1383 .expect("failed to create map");
1384
1385 // Insert 3 elements.
1386 for i in 0..3 {
1387 let key = i32::to_ne_bytes(i);
1388 // We can change i to larger for more robust test, that's why we use a and b.
1389 let val = [&key[..], &[0_u8; 4]].concat();
1390 map.update(&key, val.as_slice(), MapFlags::empty())
1391 .expect("failed to write");
1392 }
1393
1394 let mut obj = get_test_object("mapiter.bpf.o");
1395 let mut prog = get_prog_mut(&mut obj, "map_iter");
1396 let link = prog
1397 .attach_iter(map.as_fd())
1398 .expect("failed to attach map iter prog");
1399 let mut iter = Iter::new(&link).expect("failed to create map iterator");
1400
1401 let mut buf = Vec::new();
1402 let bytes_read = iter
1403 .read_to_end(&mut buf)
1404 .expect("failed to read from iterator");
1405
1406 assert!(bytes_read > 0);
1407 assert_eq!(bytes_read % size_of::<u32>(), 0);
1408 // Convert buf to &[u32]
1409 let buf =
1410 plain::slice_from_bytes::<u32>(buf.as_slice()).expect("Input slice cannot satisfy length");
1411 assert!(buf.contains(&0));
1412 assert!(buf.contains(&1));
1413 assert!(buf.contains(&2));
1414 }
1415
1416 #[tag(root)]
1417 #[test]
test_object_map_create_and_pin()1418 fn test_object_map_create_and_pin() {
1419 bump_rlimit_mlock();
1420
1421 let opts = libbpf_sys::bpf_map_create_opts {
1422 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1423 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1424 ..Default::default()
1425 };
1426
1427 let mut map = MapHandle::create(
1428 MapType::Hash,
1429 Some("mymap_test_object_map_create_and_pin"),
1430 4,
1431 8,
1432 8,
1433 &opts,
1434 )
1435 .expect("failed to create map");
1436
1437 assert_eq!(map.name(), "mymap_test_object_map_create_and_pin");
1438
1439 let key = vec![1, 2, 3, 4];
1440 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
1441 map.update(&key, &val, MapFlags::empty())
1442 .expect("failed to write");
1443 let res = map
1444 .lookup(&key, MapFlags::ANY)
1445 .expect("failed to lookup")
1446 .expect("failed to find value for key");
1447 assert_eq!(val, res);
1448
1449 let path = "/sys/fs/bpf/mymap_test_object_map_create_and_pin";
1450
1451 // Unpinning a unpinned map should be an error
1452 assert!(map.unpin(path).is_err());
1453 assert!(!Path::new(path).exists());
1454
1455 // Pin and unpin should be successful
1456 map.pin(path).expect("failed to pin map");
1457 assert!(Path::new(path).exists());
1458 map.unpin(path).expect("failed to unpin map");
1459 assert!(!Path::new(path).exists());
1460 }
1461
1462 #[tag(root)]
1463 #[test]
test_object_map_create_without_name()1464 fn test_object_map_create_without_name() {
1465 bump_rlimit_mlock();
1466
1467 #[allow(clippy::needless_update)]
1468 let opts = libbpf_sys::bpf_map_create_opts {
1469 sz: size_of::<libbpf_sys::bpf_map_create_opts>() as libbpf_sys::size_t,
1470 map_flags: libbpf_sys::BPF_F_NO_PREALLOC,
1471 btf_fd: 0,
1472 btf_key_type_id: 0,
1473 btf_value_type_id: 0,
1474 btf_vmlinux_value_type_id: 0,
1475 inner_map_fd: 0,
1476 map_extra: 0,
1477 numa_node: 0,
1478 map_ifindex: 0,
1479 // bpf_map_create_opts might have padding fields on some platform
1480 ..Default::default()
1481 };
1482
1483 let map = MapHandle::create(MapType::Hash, Option::<&str>::None, 4, 8, 8, &opts)
1484 .expect("failed to create map");
1485
1486 assert!(map.name().is_empty());
1487
1488 let key = vec![1, 2, 3, 4];
1489 let val = vec![1, 2, 3, 4, 5, 6, 7, 8];
1490 map.update(&key, &val, MapFlags::empty())
1491 .expect("failed to write");
1492 let res = map
1493 .lookup(&key, MapFlags::ANY)
1494 .expect("failed to lookup")
1495 .expect("failed to find value for key");
1496 assert_eq!(val, res);
1497 }
1498
1499 /// Test whether we can obtain multiple `MapHandle`s from a `Map
1500 #[tag(root)]
1501 #[test]
test_object_map_handle_clone()1502 fn test_object_map_handle_clone() {
1503 bump_rlimit_mlock();
1504
1505 let mut obj = get_test_object("runqslower.bpf.o");
1506 let map = get_map_mut(&mut obj, "events");
1507 let handle1 = MapHandle::try_from(&map).expect("failed to create handle from Map");
1508 assert_eq!(map.name(), handle1.name());
1509 assert_eq!(map.map_type(), handle1.map_type());
1510 assert_eq!(map.key_size(), handle1.key_size());
1511 assert_eq!(map.value_size(), handle1.value_size());
1512
1513 let handle2 = MapHandle::try_from(&handle1).expect("failed to duplicate existing handle");
1514 assert_eq!(handle1.name(), handle2.name());
1515 assert_eq!(handle1.map_type(), handle2.map_type());
1516 assert_eq!(handle1.key_size(), handle2.key_size());
1517 assert_eq!(handle1.value_size(), handle2.value_size());
1518
1519 let info1 = map.info().expect("failed to get map info from map");
1520 let info2 = handle2.info().expect("failed to get map info from handle");
1521 assert_eq!(
1522 info1.info.id, info2.info.id,
1523 "Map and MapHandle have different IDs"
1524 );
1525 }
1526
1527 #[tag(root)]
1528 #[test]
test_object_usdt()1529 fn test_object_usdt() {
1530 bump_rlimit_mlock();
1531
1532 let mut obj = get_test_object("usdt.bpf.o");
1533 let mut prog = get_prog_mut(&mut obj, "handle__usdt");
1534
1535 let path = current_exe().expect("failed to find executable name");
1536 let _link = prog
1537 .attach_usdt(
1538 unsafe { libc::getpid() },
1539 &path,
1540 "test_provider",
1541 "test_function",
1542 )
1543 .expect("failed to attach prog");
1544
1545 let map = get_map_mut(&mut obj, "ringbuf");
1546 let action = || {
1547 // Define a USDT probe point and exercise it as we are attaching to self.
1548 probe!(test_provider, test_function, 1);
1549 };
1550 let result = with_ringbuffer(&map, action);
1551
1552 assert_eq!(result, 1);
1553 }
1554
1555 #[tag(root)]
1556 #[test]
test_object_usdt_cookie()1557 fn test_object_usdt_cookie() {
1558 bump_rlimit_mlock();
1559
1560 let cookie_val = 1337u16;
1561 let mut obj = get_test_object("usdt.bpf.o");
1562 let mut prog = get_prog_mut(&mut obj, "handle__usdt_with_cookie");
1563
1564 let path = current_exe().expect("failed to find executable name");
1565 let _link = prog
1566 .attach_usdt_with_opts(
1567 unsafe { libc::getpid() },
1568 &path,
1569 "test_provider",
1570 "test_function2",
1571 UsdtOpts {
1572 cookie: cookie_val.into(),
1573 ..UsdtOpts::default()
1574 },
1575 )
1576 .expect("failed to attach prog");
1577
1578 let map = get_map_mut(&mut obj, "ringbuf");
1579 let action = || {
1580 // Define a USDT probe point and exercise it as we are attaching to self.
1581 probe!(test_provider, test_function2, 1);
1582 };
1583 let result = with_ringbuffer(&map, action);
1584
1585 assert_eq!(result, cookie_val.into());
1586 }
1587
1588 #[tag(root)]
1589 #[test]
test_map_probes()1590 fn test_map_probes() {
1591 bump_rlimit_mlock();
1592
1593 let supported = MapType::Array
1594 .is_supported()
1595 .expect("failed to query if Array map is supported");
1596 assert!(supported);
1597 let supported_res = MapType::Unknown.is_supported();
1598 assert!(supported_res.is_err());
1599 }
1600
1601 #[tag(root)]
1602 #[test]
test_program_probes()1603 fn test_program_probes() {
1604 bump_rlimit_mlock();
1605
1606 let supported = ProgramType::SocketFilter
1607 .is_supported()
1608 .expect("failed to query if SocketFilter program is supported");
1609 assert!(supported);
1610 let supported_res = ProgramType::Unknown.is_supported();
1611 assert!(supported_res.is_err());
1612 }
1613
1614 #[tag(root)]
1615 #[test]
test_program_helper_probes()1616 fn test_program_helper_probes() {
1617 bump_rlimit_mlock();
1618
1619 let supported = ProgramType::SocketFilter
1620 .is_helper_supported(libbpf_sys::BPF_FUNC_map_lookup_elem)
1621 .expect("failed to query if helper supported");
1622 assert!(supported);
1623 // redirect should not be supported from socket filter, as it is only used in TC/XDP.
1624 let supported = ProgramType::SocketFilter
1625 .is_helper_supported(libbpf_sys::BPF_FUNC_redirect)
1626 .expect("failed to query if helper supported");
1627 assert!(!supported);
1628 let supported_res = MapType::Unknown.is_supported();
1629 assert!(supported_res.is_err());
1630 }
1631
1632 #[tag(root)]
1633 #[test]
test_object_open_program_insns()1634 fn test_object_open_program_insns() {
1635 bump_rlimit_mlock();
1636
1637 let open_obj = open_test_object("usdt.bpf.o");
1638 let prog = open_obj
1639 .progs()
1640 .find(|prog| prog.name() == OsStr::new("handle__usdt"))
1641 .expect("failed to find program");
1642
1643 let insns = prog.insns();
1644 assert!(!insns.is_empty());
1645 }
1646
1647 #[tag(root)]
1648 #[test]
test_object_program_insns()1649 fn test_object_program_insns() {
1650 bump_rlimit_mlock();
1651
1652 let mut obj = get_test_object("usdt.bpf.o");
1653 let prog = get_prog_mut(&mut obj, "handle__usdt");
1654 let insns = prog.insns();
1655 assert!(!insns.is_empty());
1656 }
1657
1658 /// Check that we can attach a BPF program to a kernel tracepoint.
1659 #[tag(root)]
1660 #[test]
test_object_tracepoint()1661 fn test_object_tracepoint() {
1662 bump_rlimit_mlock();
1663
1664 let mut obj = get_test_object("tracepoint.bpf.o");
1665 let mut prog = get_prog_mut(&mut obj, "handle__tracepoint");
1666 let _link = prog
1667 .attach_tracepoint("syscalls", "sys_enter_getpid")
1668 .expect("failed to attach prog");
1669
1670 let map = get_map_mut(&mut obj, "ringbuf");
1671 let action = || {
1672 let _pid = unsafe { libc::getpid() };
1673 };
1674 let result = with_ringbuffer(&map, action);
1675
1676 assert_eq!(result, 1);
1677 }
1678
1679 /// Check that we can attach a BPF program to a kernel tracepoint, providing
1680 /// additional options.
1681 #[tag(root)]
1682 #[test]
test_object_tracepoint_with_opts()1683 fn test_object_tracepoint_with_opts() {
1684 bump_rlimit_mlock();
1685
1686 let cookie_val = 42u16;
1687 let mut obj = get_test_object("tracepoint.bpf.o");
1688 let mut prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie");
1689
1690 let opts = TracepointOpts {
1691 cookie: cookie_val.into(),
1692 ..TracepointOpts::default()
1693 };
1694 let _link = prog
1695 .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
1696 .expect("failed to attach prog");
1697
1698 let map = get_map_mut(&mut obj, "ringbuf");
1699 let action = || {
1700 let _pid = unsafe { libc::getpid() };
1701 };
1702 let result = with_ringbuffer(&map, action);
1703
1704 assert_eq!(result, cookie_val.into());
1705 }
1706
1707 #[inline(never)]
1708 #[no_mangle]
uprobe_target() -> usize1709 extern "C" fn uprobe_target() -> usize {
1710 // Use `black_box` here as an additional barrier to inlining.
1711 hint::black_box(42)
1712 }
1713
1714 /// Check that we can attach a BPF program to a uprobe.
1715 #[tag(root)]
1716 #[test]
test_object_uprobe_with_opts()1717 fn test_object_uprobe_with_opts() {
1718 bump_rlimit_mlock();
1719
1720 let mut obj = get_test_object("uprobe.bpf.o");
1721 let mut prog = get_prog_mut(&mut obj, "handle__uprobe");
1722
1723 let pid = unsafe { libc::getpid() };
1724 let path = current_exe().expect("failed to find executable name");
1725 let func_offset = 0;
1726 let opts = UprobeOpts {
1727 func_name: "uprobe_target".to_string(),
1728 ..Default::default()
1729 };
1730 let _link = prog
1731 .attach_uprobe_with_opts(pid, path, func_offset, opts)
1732 .expect("failed to attach prog");
1733
1734 let map = get_map_mut(&mut obj, "ringbuf");
1735 let action = || {
1736 let _ = uprobe_target();
1737 };
1738 let result = with_ringbuffer(&map, action);
1739
1740 assert_eq!(result, 1);
1741 }
1742
1743 /// Check that we can attach a BPF program to a uprobe and access the cookie
1744 /// provided during attach.
1745 #[tag(root)]
1746 #[test]
test_object_uprobe_with_cookie()1747 fn test_object_uprobe_with_cookie() {
1748 bump_rlimit_mlock();
1749
1750 let cookie_val = 5u16;
1751 let mut obj = get_test_object("uprobe.bpf.o");
1752 let mut prog = get_prog_mut(&mut obj, "handle__uprobe_with_cookie");
1753
1754 let pid = unsafe { libc::getpid() };
1755 let path = current_exe().expect("failed to find executable name");
1756 let func_offset = 0;
1757 let opts = UprobeOpts {
1758 func_name: "uprobe_target".to_string(),
1759 cookie: cookie_val.into(),
1760 ..Default::default()
1761 };
1762 let _link = prog
1763 .attach_uprobe_with_opts(pid, path, func_offset, opts)
1764 .expect("failed to attach prog");
1765
1766 let map = get_map_mut(&mut obj, "ringbuf");
1767 let action = || {
1768 let _ = uprobe_target();
1769 };
1770 let result = with_ringbuffer(&map, action);
1771
1772 assert_eq!(result, cookie_val.into());
1773 }
1774
1775 /// Check that we can link multiple object files.
1776 #[test]
test_object_link_files()1777 fn test_object_link_files() {
1778 fn test(files: Vec<PathBuf>) {
1779 let output_file = NamedTempFile::new().unwrap();
1780
1781 let mut linker = Linker::new(output_file.path()).unwrap();
1782 let () = files
1783 .into_iter()
1784 .try_for_each(|file| linker.add_file(file))
1785 .unwrap();
1786 let () = linker.link().unwrap();
1787
1788 // Check that we can load the resulting object file.
1789 let _object = ObjectBuilder::default()
1790 .debug(true)
1791 .open_file(output_file.path())
1792 .unwrap();
1793 }
1794
1795 let obj_path1 = get_test_object_path("usdt.bpf.o");
1796 let obj_path2 = get_test_object_path("ringbuf.bpf.o");
1797
1798 test(vec![obj_path1.clone()]);
1799 test(vec![obj_path1, obj_path2]);
1800 }
1801
1802 /// Get access to the underlying per-cpu ring buffer data.
buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8]1803 fn buffer<'a>(perf: &'a libbpf_rs::PerfBuffer, buf_idx: usize) -> &'a [u8] {
1804 let perf_buff_ptr = perf.as_libbpf_object();
1805 let mut buffer_data_ptr: *mut c_void = ptr::null_mut();
1806 let mut buffer_size: usize = 0;
1807 let ret = unsafe {
1808 libbpf_sys::perf_buffer__buffer(
1809 perf_buff_ptr.as_ptr(),
1810 buf_idx as i32,
1811 ptr::addr_of_mut!(buffer_data_ptr),
1812 ptr::addr_of_mut!(buffer_size) as *mut libbpf_sys::size_t,
1813 )
1814 };
1815 assert!(ret >= 0);
1816 unsafe { slice::from_raw_parts(buffer_data_ptr as *const u8, buffer_size) }
1817 }
1818
1819 /// Check that we can see the raw ring buffer of the perf buffer and find a
1820 /// value we have sent.
1821 #[tag(root)]
1822 #[test]
test_object_perf_buffer_raw()1823 fn test_object_perf_buffer_raw() {
1824 use memmem::Searcher;
1825 use memmem::TwoWaySearcher;
1826
1827 bump_rlimit_mlock();
1828
1829 let cookie_val = 42u16;
1830 let mut obj = get_test_object("tracepoint.bpf.o");
1831 let mut prog = get_prog_mut(&mut obj, "handle__tracepoint_with_cookie_pb");
1832
1833 let opts = TracepointOpts {
1834 cookie: cookie_val.into(),
1835 ..TracepointOpts::default()
1836 };
1837 let _link = prog
1838 .attach_tracepoint_with_opts("syscalls", "sys_enter_getpid", opts)
1839 .expect("failed to attach prog");
1840
1841 let map = get_map_mut(&mut obj, "pb");
1842 let cookie_bytes = cookie_val.to_ne_bytes();
1843 let searcher = TwoWaySearcher::new(&cookie_bytes[..]);
1844
1845 let perf = libbpf_rs::PerfBufferBuilder::new(&map)
1846 .build()
1847 .expect("failed to build");
1848
1849 // Make an action that the tracepoint will see
1850 let _pid = unsafe { libc::getpid() };
1851
1852 let found_cookie = (0..perf.buffer_cnt()).any(|buf_idx| {
1853 let buf = buffer(&perf, buf_idx);
1854 searcher.search_in(buf).is_some()
1855 });
1856
1857 assert!(found_cookie);
1858 }
1859
1860 /// Check that we can get map pin status and map pin path
1861 #[tag(root)]
1862 #[test]
test_map_pinned_status()1863 fn test_map_pinned_status() {
1864 bump_rlimit_mlock();
1865
1866 let mut obj = get_test_object("map_auto_pin.bpf.o");
1867 let map = get_map_mut(&mut obj, "auto_pin_map");
1868 let is_pinned = map.is_pinned();
1869 assert!(is_pinned);
1870 let expected_path = "/sys/fs/bpf/auto_pin_map";
1871 let get_path = map.get_pin_path().expect("get map pin path failed");
1872 assert_eq!(expected_path, get_path.to_str().unwrap());
1873 // cleanup
1874 let _ = fs::remove_file(expected_path);
1875 }
1876
1877 /// Change the root_pin_path and see if it works.
1878 #[tag(root)]
1879 #[test]
test_map_pinned_status_with_pin_root_path()1880 fn test_map_pinned_status_with_pin_root_path() {
1881 bump_rlimit_mlock();
1882
1883 let obj_path = get_test_object_path("map_auto_pin.bpf.o");
1884 let mut obj = ObjectBuilder::default()
1885 .debug(true)
1886 .pin_root_path("/sys/fs/bpf/test_namespace")
1887 .expect("root_pin_path failed")
1888 .open_file(obj_path)
1889 .expect("failed to open object")
1890 .load()
1891 .expect("failed to load object");
1892
1893 let map = get_map_mut(&mut obj, "auto_pin_map");
1894 let is_pinned = map.is_pinned();
1895 assert!(is_pinned);
1896 let expected_path = "/sys/fs/bpf/test_namespace/auto_pin_map";
1897 let get_path = map.get_pin_path().expect("get map pin path failed");
1898 assert_eq!(expected_path, get_path.to_str().unwrap());
1899 // cleanup
1900 let _ = fs::remove_file(expected_path);
1901 let _ = fs::remove_dir("/sys/fs/bpf/test_namespace");
1902 }
1903
1904 /// Check that we can get program fd by id and vice versa.
1905 #[tag(root)]
1906 #[test]
test_program_get_fd_and_id()1907 fn test_program_get_fd_and_id() {
1908 bump_rlimit_mlock();
1909
1910 let mut obj = get_test_object("runqslower.bpf.o");
1911 let prog = get_prog_mut(&mut obj, "handle__sched_wakeup");
1912 let prog_fd = prog.as_fd();
1913 let prog_id = Program::get_id_by_fd(prog_fd).expect("failed to get program id by fd");
1914 let _owned_prog_fd = Program::get_fd_by_id(prog_id).expect("failed to get program fd by id");
1915 }
1916
1917 /// Check that autocreate disabled maps don't prevent object loading
1918 #[tag(root)]
1919 #[test]
test_map_autocreate_disable()1920 fn test_map_autocreate_disable() {
1921 bump_rlimit_mlock();
1922
1923 let mut open_obj = open_test_object("map_auto_pin.bpf.o");
1924 let mut auto_pin_map = open_obj
1925 .maps_mut()
1926 .find(|map| map.name() == OsStr::new("auto_pin_map"))
1927 .expect("failed to find `auto_pin_map` map");
1928 auto_pin_map
1929 .set_autocreate(false)
1930 .expect("set_autocreate() failed");
1931
1932 open_obj.load().expect("failed to load object");
1933 }
1934
1935 /// Check that we can resize a map.
1936 #[tag(root)]
1937 #[test]
test_map_resize()1938 fn test_map_resize() {
1939 bump_rlimit_mlock();
1940
1941 let mut open_obj = open_test_object("map_auto_pin.bpf.o");
1942 let mut resizable = open_obj
1943 .maps_mut()
1944 .find(|map| map.name() == OsStr::new(".data.resizable_data"))
1945 .expect("failed to find `.data.resizable_data` map");
1946
1947 let len = resizable.initial_value().unwrap().len();
1948 assert_eq!(len, size_of::<u64>());
1949
1950 let () = resizable
1951 .set_value_size(len as u32 * 2)
1952 .expect("failed to set value size");
1953 let new_len = resizable.initial_value().unwrap().len();
1954 assert_eq!(new_len, len * 2);
1955 }
1956
1957 /// Check that we are able to attach using ksyscall
1958 #[tag(root)]
1959 #[test]
test_attach_ksyscall()1960 fn test_attach_ksyscall() {
1961 bump_rlimit_mlock();
1962
1963 let mut obj = get_test_object("ksyscall.bpf.o");
1964 let mut prog = get_prog_mut(&mut obj, "handle__ksyscall");
1965 let _link = prog
1966 .attach_ksyscall(false, "kill")
1967 .expect("failed to attach prog");
1968
1969 let map = get_map_mut(&mut obj, "ringbuf");
1970 let action = || {
1971 // Send `SIGCHLD`, which is ignored by default, to our process.
1972 let ret = unsafe { libc::kill(libc::getpid(), libc::SIGCHLD) };
1973 if ret < 0 {
1974 panic!("kill failed: {}", io::Error::last_os_error());
1975 }
1976 };
1977 let result = with_ringbuffer(&map, action);
1978
1979 assert_eq!(result, 1);
1980 }
1981
1982 /// Check that we can invoke a program directly.
1983 #[tag(root)]
1984 #[test]
test_run_prog_success()1985 fn test_run_prog_success() {
1986 bump_rlimit_mlock();
1987
1988 let mut obj = get_test_object("run_prog.bpf.o");
1989 let mut prog = get_prog_mut(&mut obj, "test_1");
1990
1991 #[repr(C)]
1992 struct bpf_dummy_ops_state {
1993 val: c_int,
1994 }
1995
1996 let value = 42;
1997 let state = bpf_dummy_ops_state { val: value };
1998 let mut args = [addr_of!(state) as u64];
1999 let input = ProgramInput {
2000 context_in: Some(unsafe {
2001 slice::from_raw_parts_mut(&mut args as *mut _ as *mut u8, size_of_val(&args))
2002 }),
2003 ..Default::default()
2004 };
2005 let output = prog.test_run(input).unwrap();
2006 assert_eq!(output.return_value, value as _);
2007 }
2008
2009 /// Check that we fail program invocation when providing insufficient arguments.
2010 #[tag(root)]
2011 #[test]
test_run_prog_fail()2012 fn test_run_prog_fail() {
2013 bump_rlimit_mlock();
2014
2015 let mut obj = get_test_object("run_prog.bpf.o");
2016 let mut prog = get_prog_mut(&mut obj, "test_2");
2017
2018 let input = ProgramInput::default();
2019 let _err = prog.test_run(input).unwrap_err();
2020 }
2021