1 use core::ffi::c_void;
2 use std::ffi::CStr;
3 use std::ffi::CString;
4 use std::ffi::OsStr;
5 use std::ffi::OsString;
6 use std::fmt::Debug;
7 use std::fs::remove_file;
8 use std::io;
9 use std::marker::PhantomData;
10 use std::mem;
11 use std::mem::transmute;
12 use std::ops::Deref;
13 use std::os::unix::ffi::OsStrExt;
14 use std::os::unix::io::AsFd;
15 use std::os::unix::io::AsRawFd;
16 use std::os::unix::io::BorrowedFd;
17 use std::os::unix::io::FromRawFd;
18 use std::os::unix::io::OwnedFd;
19 use std::os::unix::io::RawFd;
20 use std::path::Path;
21 use std::ptr;
22 use std::ptr::NonNull;
23 use std::slice;
24 use std::slice::from_raw_parts;
25 
26 use bitflags::bitflags;
27 use libbpf_sys::bpf_map_info;
28 use libbpf_sys::bpf_obj_get_info_by_fd;
29 
30 use crate::util;
31 use crate::util::parse_ret_i32;
32 use crate::util::validate_bpf_ret;
33 use crate::AsRawLibbpf;
34 use crate::Error;
35 use crate::ErrorExt as _;
36 use crate::Link;
37 use crate::Mut;
38 use crate::Result;
39 
40 /// An immutable parsed but not yet loaded BPF map.
41 pub type OpenMap<'obj> = OpenMapImpl<'obj>;
42 /// A mutable parsed but not yet loaded BPF map.
43 pub type OpenMapMut<'obj> = OpenMapImpl<'obj, Mut>;
44 
45 
46 /// Represents a parsed but not yet loaded BPF map.
47 ///
48 /// This object exposes operations that need to happen before the map is created.
49 ///
50 /// Some methods require working with raw bytes. You may find libraries such as
51 /// [`plain`](https://crates.io/crates/plain) helpful.
52 #[derive(Debug)]
53 #[repr(transparent)]
54 pub struct OpenMapImpl<'obj, T = ()> {
55     ptr: NonNull<libbpf_sys::bpf_map>,
56     _phantom: PhantomData<&'obj T>,
57 }
58 
59 // TODO: Document members.
60 #[allow(missing_docs)]
61 impl<'obj> OpenMap<'obj> {
62     /// Create a new [`OpenMap`] from a ptr to a `libbpf_sys::bpf_map`.
new(object: &'obj libbpf_sys::bpf_map) -> Self63     pub fn new(object: &'obj libbpf_sys::bpf_map) -> Self {
64         // SAFETY: We inferred the address from a reference, which is always
65         //         valid.
66         Self {
67             ptr: unsafe { NonNull::new_unchecked(object as *const _ as *mut _) },
68             _phantom: PhantomData,
69         }
70     }
71 
72     /// Retrieve the [`OpenMap`]'s name.
name(&self) -> &OsStr73     pub fn name(&self) -> &OsStr {
74         // SAFETY: We ensured `ptr` is valid during construction.
75         let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
76         // SAFETY: `bpf_map__name` can return NULL but only if it's passed
77         //          NULL. We know `ptr` is not NULL.
78         let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
79         OsStr::from_bytes(name_c_str.to_bytes())
80     }
81 
82     /// Retrieve type of the map.
map_type(&self) -> MapType83     pub fn map_type(&self) -> MapType {
84         let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
85         MapType::from(ty)
86     }
87 
initial_value_raw(&self) -> (*mut u8, usize)88     fn initial_value_raw(&self) -> (*mut u8, usize) {
89         let mut size = 0u64;
90         let ptr = unsafe {
91             libbpf_sys::bpf_map__initial_value(self.ptr.as_ptr(), &mut size as *mut _ as _)
92         };
93         (ptr.cast(), size as _)
94     }
95 
96     /// Retrieve the initial value of the map.
initial_value(&self) -> Option<&[u8]>97     pub fn initial_value(&self) -> Option<&[u8]> {
98         let (ptr, size) = self.initial_value_raw();
99         if ptr.is_null() {
100             None
101         } else {
102             let data = unsafe { slice::from_raw_parts(ptr.cast::<u8>(), size) };
103             Some(data)
104         }
105     }
106 }
107 
108 impl<'obj> OpenMapMut<'obj> {
109     /// Create a new [`OpenMapMut`] from a ptr to a `libbpf_sys::bpf_map`.
new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self110     pub fn new_mut(object: &'obj mut libbpf_sys::bpf_map) -> Self {
111         Self {
112             ptr: unsafe { NonNull::new_unchecked(object as *mut _) },
113             _phantom: PhantomData,
114         }
115     }
116 
117     /// Retrieve the initial value of the map.
initial_value_mut(&mut self) -> Option<&mut [u8]>118     pub fn initial_value_mut(&mut self) -> Option<&mut [u8]> {
119         let (ptr, size) = self.initial_value_raw();
120         if ptr.is_null() {
121             None
122         } else {
123             let data = unsafe { slice::from_raw_parts_mut(ptr.cast::<u8>(), size) };
124             Some(data)
125         }
126     }
127 
set_map_ifindex(&mut self, idx: u32)128     pub fn set_map_ifindex(&mut self, idx: u32) {
129         unsafe { libbpf_sys::bpf_map__set_ifindex(self.ptr.as_ptr(), idx) };
130     }
131 
set_initial_value(&mut self, data: &[u8]) -> Result<()>132     pub fn set_initial_value(&mut self, data: &[u8]) -> Result<()> {
133         let ret = unsafe {
134             libbpf_sys::bpf_map__set_initial_value(
135                 self.ptr.as_ptr(),
136                 data.as_ptr() as *const c_void,
137                 data.len() as libbpf_sys::size_t,
138             )
139         };
140 
141         util::parse_ret(ret)
142     }
143 
set_type(&mut self, ty: MapType) -> Result<()>144     pub fn set_type(&mut self, ty: MapType) -> Result<()> {
145         let ret = unsafe { libbpf_sys::bpf_map__set_type(self.ptr.as_ptr(), ty as u32) };
146         util::parse_ret(ret)
147     }
148 
set_key_size(&mut self, size: u32) -> Result<()>149     pub fn set_key_size(&mut self, size: u32) -> Result<()> {
150         let ret = unsafe { libbpf_sys::bpf_map__set_key_size(self.ptr.as_ptr(), size) };
151         util::parse_ret(ret)
152     }
153 
set_value_size(&mut self, size: u32) -> Result<()>154     pub fn set_value_size(&mut self, size: u32) -> Result<()> {
155         let ret = unsafe { libbpf_sys::bpf_map__set_value_size(self.ptr.as_ptr(), size) };
156         util::parse_ret(ret)
157     }
158 
set_max_entries(&mut self, count: u32) -> Result<()>159     pub fn set_max_entries(&mut self, count: u32) -> Result<()> {
160         let ret = unsafe { libbpf_sys::bpf_map__set_max_entries(self.ptr.as_ptr(), count) };
161         util::parse_ret(ret)
162     }
163 
set_map_flags(&mut self, flags: u32) -> Result<()>164     pub fn set_map_flags(&mut self, flags: u32) -> Result<()> {
165         let ret = unsafe { libbpf_sys::bpf_map__set_map_flags(self.ptr.as_ptr(), flags) };
166         util::parse_ret(ret)
167     }
168 
set_numa_node(&mut self, numa_node: u32) -> Result<()>169     pub fn set_numa_node(&mut self, numa_node: u32) -> Result<()> {
170         let ret = unsafe { libbpf_sys::bpf_map__set_numa_node(self.ptr.as_ptr(), numa_node) };
171         util::parse_ret(ret)
172     }
173 
set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()>174     pub fn set_inner_map_fd(&mut self, inner_map_fd: BorrowedFd<'_>) -> Result<()> {
175         let ret = unsafe {
176             libbpf_sys::bpf_map__set_inner_map_fd(self.ptr.as_ptr(), inner_map_fd.as_raw_fd())
177         };
178         util::parse_ret(ret)
179     }
180 
set_map_extra(&mut self, map_extra: u64) -> Result<()>181     pub fn set_map_extra(&mut self, map_extra: u64) -> Result<()> {
182         let ret = unsafe { libbpf_sys::bpf_map__set_map_extra(self.ptr.as_ptr(), map_extra) };
183         util::parse_ret(ret)
184     }
185 
set_autocreate(&mut self, autocreate: bool) -> Result<()>186     pub fn set_autocreate(&mut self, autocreate: bool) -> Result<()> {
187         let ret = unsafe { libbpf_sys::bpf_map__set_autocreate(self.ptr.as_ptr(), autocreate) };
188         util::parse_ret(ret)
189     }
190 
set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()>191     pub fn set_pin_path<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
192         let path_c = util::path_to_cstring(path)?;
193         let path_ptr = path_c.as_ptr();
194 
195         let ret = unsafe { libbpf_sys::bpf_map__set_pin_path(self.ptr.as_ptr(), path_ptr) };
196         util::parse_ret(ret)
197     }
198 
199     /// Reuse an fd for a BPF map
reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()>200     pub fn reuse_fd(&mut self, fd: BorrowedFd<'_>) -> Result<()> {
201         let ret = unsafe { libbpf_sys::bpf_map__reuse_fd(self.ptr.as_ptr(), fd.as_raw_fd()) };
202         util::parse_ret(ret)
203     }
204 
205     /// Reuse an already-pinned map for `self`.
reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()>206     pub fn reuse_pinned_map<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
207         let cstring = util::path_to_cstring(path)?;
208 
209         let fd = unsafe { libbpf_sys::bpf_obj_get(cstring.as_ptr()) };
210         if fd < 0 {
211             return Err(Error::from(io::Error::last_os_error()));
212         }
213 
214         let fd = unsafe { OwnedFd::from_raw_fd(fd) };
215 
216         let reuse_result = self.reuse_fd(fd.as_fd());
217 
218         reuse_result
219     }
220 }
221 
222 impl<'obj> Deref for OpenMapMut<'obj> {
223     type Target = OpenMap<'obj>;
224 
deref(&self) -> &Self::Target225     fn deref(&self) -> &Self::Target {
226         // SAFETY: `OpenMapImpl` is `repr(transparent)` and so in-memory
227         //         representation of both types is the same.
228         unsafe { transmute::<&OpenMapMut<'obj>, &OpenMap<'obj>>(self) }
229     }
230 }
231 
232 impl<T> AsRawLibbpf for OpenMapImpl<'_, T> {
233     type LibbpfType = libbpf_sys::bpf_map;
234 
235     /// Retrieve the underlying [`libbpf_sys::bpf_map`].
as_libbpf_object(&self) -> NonNull<Self::LibbpfType>236     fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
237         self.ptr
238     }
239 }
240 
map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd>241 pub(crate) fn map_fd(map: NonNull<libbpf_sys::bpf_map>) -> Option<RawFd> {
242     let fd = unsafe { libbpf_sys::bpf_map__fd(map.as_ptr()) };
243     let fd = util::parse_ret_i32(fd).ok().map(|fd| fd as RawFd);
244     fd
245 }
246 
247 /// Return the size of one value including padding for interacting with per-cpu
248 /// maps. The values are aligned to 8 bytes.
percpu_aligned_value_size<M>(map: &M) -> usize where M: MapCore + ?Sized,249 fn percpu_aligned_value_size<M>(map: &M) -> usize
250 where
251     M: MapCore + ?Sized,
252 {
253     let val_size = map.value_size() as usize;
254     util::roundup(val_size, 8)
255 }
256 
257 /// Returns the size of the buffer needed for a lookup/update of a per-cpu map.
percpu_buffer_size<M>(map: &M) -> Result<usize> where M: MapCore + ?Sized,258 fn percpu_buffer_size<M>(map: &M) -> Result<usize>
259 where
260     M: MapCore + ?Sized,
261 {
262     let aligned_val_size = percpu_aligned_value_size(map);
263     let ncpu = crate::num_possible_cpus()?;
264     Ok(ncpu * aligned_val_size)
265 }
266 
267 /// Apply a key check and return a null pointer in case of dealing with queue/stack/bloom-filter
268 /// map, before passing the key to the bpf functions that support the map of type
269 /// queue/stack/bloom-filter.
map_key<M>(map: &M, key: &[u8]) -> *const c_void where M: MapCore + ?Sized,270 fn map_key<M>(map: &M, key: &[u8]) -> *const c_void
271 where
272     M: MapCore + ?Sized,
273 {
274     // For all they keyless maps we null out the key per documentation of libbpf
275     if map.key_size() == 0 && map.map_type().is_keyless() {
276         return ptr::null();
277     }
278 
279     key.as_ptr() as *const c_void
280 }
281 
282 /// Internal function to return a value from a map into a buffer of the given size.
lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>> where M: MapCore + ?Sized,283 fn lookup_raw<M>(map: &M, key: &[u8], flags: MapFlags, out_size: usize) -> Result<Option<Vec<u8>>>
284 where
285     M: MapCore + ?Sized,
286 {
287     if key.len() != map.key_size() as usize {
288         return Err(Error::with_invalid_data(format!(
289             "key_size {} != {}",
290             key.len(),
291             map.key_size()
292         )));
293     };
294 
295     let mut out: Vec<u8> = Vec::with_capacity(out_size);
296 
297     let ret = unsafe {
298         libbpf_sys::bpf_map_lookup_elem_flags(
299             map.as_fd().as_raw_fd(),
300             map_key(map, key),
301             out.as_mut_ptr() as *mut c_void,
302             flags.bits(),
303         )
304     };
305 
306     if ret == 0 {
307         unsafe {
308             out.set_len(out_size);
309         }
310         Ok(Some(out))
311     } else {
312         let err = io::Error::last_os_error();
313         if err.kind() == io::ErrorKind::NotFound {
314             Ok(None)
315         } else {
316             Err(Error::from(err))
317         }
318     }
319 }
320 
321 /// Internal function to update a map. This does not check the length of the
322 /// supplied value.
update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> where M: MapCore + ?Sized,323 fn update_raw<M>(map: &M, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>
324 where
325     M: MapCore + ?Sized,
326 {
327     if key.len() != map.key_size() as usize {
328         return Err(Error::with_invalid_data(format!(
329             "key_size {} != {}",
330             key.len(),
331             map.key_size()
332         )));
333     };
334 
335     let ret = unsafe {
336         libbpf_sys::bpf_map_update_elem(
337             map.as_fd().as_raw_fd(),
338             map_key(map, key),
339             value.as_ptr() as *const c_void,
340             flags.bits(),
341         )
342     };
343 
344     util::parse_ret(ret)
345 }
346 
347 #[allow(clippy::wildcard_imports)]
348 mod private {
349     use super::*;
350 
351     pub trait Sealed {}
352 
353     impl<T> Sealed for MapImpl<'_, T> {}
354     impl Sealed for MapHandle {}
355 }
356 
357 /// A trait representing core functionality common to fully initialized maps.
358 pub trait MapCore: Debug + AsFd + private::Sealed {
359     /// Retrieve the map's name.
name(&self) -> &OsStr360     fn name(&self) -> &OsStr;
361 
362     /// Retrieve type of the map.
map_type(&self) -> MapType363     fn map_type(&self) -> MapType;
364 
365     /// Retrieve the size of the map's keys.
key_size(&self) -> u32366     fn key_size(&self) -> u32;
367 
368     /// Retrieve the size of the map's values.
value_size(&self) -> u32369     fn value_size(&self) -> u32;
370 
371     /// Fetch extra map information
372     #[inline]
info(&self) -> Result<MapInfo>373     fn info(&self) -> Result<MapInfo> {
374         MapInfo::new(self.as_fd())
375     }
376 
377     /// Returns an iterator over keys in this map
378     ///
379     /// Note that if the map is not stable (stable meaning no updates or deletes) during iteration,
380     /// iteration can skip keys, restart from the beginning, or duplicate keys. In other words,
381     /// iteration becomes unpredictable.
keys(&self) -> MapKeyIter<'_>382     fn keys(&self) -> MapKeyIter<'_> {
383         MapKeyIter::new(self.as_fd(), self.key_size())
384     }
385 
386     /// Returns map value as `Vec` of `u8`.
387     ///
388     /// `key` must have exactly [`Self::key_size()`] elements.
389     ///
390     /// If the map is one of the per-cpu data structures, the function [`Self::lookup_percpu()`]
391     /// must be used.
392     /// If the map is of type bloom_filter the function [`Self::lookup_bloom_filter()`] must be used
lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>>393     fn lookup(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<u8>>> {
394         if self.map_type().is_bloom_filter() {
395             return Err(Error::with_invalid_data(
396                 "lookup_bloom_filter() must be used for bloom filter maps",
397             ));
398         }
399         if self.map_type().is_percpu() {
400             return Err(Error::with_invalid_data(format!(
401                 "lookup_percpu() must be used for per-cpu maps (type of the map is {:?})",
402                 self.map_type(),
403             )));
404         }
405 
406         let out_size = self.value_size() as usize;
407         lookup_raw(self, key, flags, out_size)
408     }
409 
410     /// Returns if the given value is likely present in bloom_filter as `bool`.
411     ///
412     /// `value` must have exactly [`Self::value_size()`] elements.
lookup_bloom_filter(&self, value: &[u8]) -> Result<bool>413     fn lookup_bloom_filter(&self, value: &[u8]) -> Result<bool> {
414         let ret = unsafe {
415             libbpf_sys::bpf_map_lookup_elem(
416                 self.as_fd().as_raw_fd(),
417                 ptr::null(),
418                 value.to_vec().as_mut_ptr() as *mut c_void,
419             )
420         };
421 
422         if ret == 0 {
423             Ok(true)
424         } else {
425             let err = io::Error::last_os_error();
426             if err.kind() == io::ErrorKind::NotFound {
427                 Ok(false)
428             } else {
429                 Err(Error::from(err))
430             }
431         }
432     }
433 
434     /// Returns one value per cpu as `Vec` of `Vec` of `u8` for per per-cpu maps.
435     ///
436     /// For normal maps, [`Self::lookup()`] must be used.
lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>>437     fn lookup_percpu(&self, key: &[u8], flags: MapFlags) -> Result<Option<Vec<Vec<u8>>>> {
438         if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
439             return Err(Error::with_invalid_data(format!(
440                 "lookup() must be used for maps that are not per-cpu (type of the map is {:?})",
441                 self.map_type(),
442             )));
443         }
444 
445         let val_size = self.value_size() as usize;
446         let aligned_val_size = percpu_aligned_value_size(self);
447         let out_size = percpu_buffer_size(self)?;
448 
449         let raw_res = lookup_raw(self, key, flags, out_size)?;
450         if let Some(raw_vals) = raw_res {
451             let mut out = Vec::new();
452             for chunk in raw_vals.chunks_exact(aligned_val_size) {
453                 out.push(chunk[..val_size].to_vec());
454             }
455             Ok(Some(out))
456         } else {
457             Ok(None)
458         }
459     }
460 
461     /// Deletes an element from the map.
462     ///
463     /// `key` must have exactly [`Self::key_size()`] elements.
delete(&self, key: &[u8]) -> Result<()>464     fn delete(&self, key: &[u8]) -> Result<()> {
465         if key.len() != self.key_size() as usize {
466             return Err(Error::with_invalid_data(format!(
467                 "key_size {} != {}",
468                 key.len(),
469                 self.key_size()
470             )));
471         };
472 
473         let ret = unsafe {
474             libbpf_sys::bpf_map_delete_elem(self.as_fd().as_raw_fd(), key.as_ptr() as *const c_void)
475         };
476         util::parse_ret(ret)
477     }
478 
479     /// Deletes many elements in batch mode from the map.
480     ///
481     /// `keys` must have exactly [`Self::key_size()` * count] elements.
delete_batch( &self, keys: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()>482     fn delete_batch(
483         &self,
484         keys: &[u8],
485         count: u32,
486         elem_flags: MapFlags,
487         flags: MapFlags,
488     ) -> Result<()> {
489         if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
490             return Err(Error::with_invalid_data(format!(
491                 "batch key_size {} != {} * {}",
492                 keys.len(),
493                 self.key_size(),
494                 count
495             )));
496         };
497 
498         #[allow(clippy::needless_update)]
499         let opts = libbpf_sys::bpf_map_batch_opts {
500             sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
501             elem_flags: elem_flags.bits(),
502             flags: flags.bits(),
503             // bpf_map_batch_opts might have padding fields on some platform
504             ..Default::default()
505         };
506 
507         let mut count = count;
508         let ret = unsafe {
509             libbpf_sys::bpf_map_delete_batch(
510                 self.as_fd().as_raw_fd(),
511                 keys.as_ptr() as *const c_void,
512                 (&mut count) as *mut u32,
513                 &opts as *const libbpf_sys::bpf_map_batch_opts,
514             )
515         };
516         util::parse_ret(ret)
517     }
518 
519     /// Same as [`Self::lookup()`] except this also deletes the key from the map.
520     ///
521     /// Note that this operation is currently only implemented in the kernel for [`MapType::Queue`]
522     /// and [`MapType::Stack`].
523     ///
524     /// `key` must have exactly [`Self::key_size()`] elements.
lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>>525     fn lookup_and_delete(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
526         if key.len() != self.key_size() as usize {
527             return Err(Error::with_invalid_data(format!(
528                 "key_size {} != {}",
529                 key.len(),
530                 self.key_size()
531             )));
532         };
533 
534         let mut out: Vec<u8> = Vec::with_capacity(self.value_size() as usize);
535 
536         let ret = unsafe {
537             libbpf_sys::bpf_map_lookup_and_delete_elem(
538                 self.as_fd().as_raw_fd(),
539                 map_key(self, key),
540                 out.as_mut_ptr() as *mut c_void,
541             )
542         };
543 
544         if ret == 0 {
545             unsafe {
546                 out.set_len(self.value_size() as usize);
547             }
548             Ok(Some(out))
549         } else {
550             let err = io::Error::last_os_error();
551             if err.kind() == io::ErrorKind::NotFound {
552                 Ok(None)
553             } else {
554                 Err(Error::from(err))
555             }
556         }
557     }
558 
559     /// Update an element.
560     ///
561     /// `key` must have exactly [`Self::key_size()`] elements. `value` must have exactly
562     /// [`Self::value_size()`] elements.
563     ///
564     /// For per-cpu maps, [`Self::update_percpu()`] must be used.
update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()>565     fn update(&self, key: &[u8], value: &[u8], flags: MapFlags) -> Result<()> {
566         if self.map_type().is_percpu() {
567             return Err(Error::with_invalid_data(format!(
568                 "update_percpu() must be used for per-cpu maps (type of the map is {:?})",
569                 self.map_type(),
570             )));
571         }
572 
573         if value.len() != self.value_size() as usize {
574             return Err(Error::with_invalid_data(format!(
575                 "value_size {} != {}",
576                 value.len(),
577                 self.value_size()
578             )));
579         };
580 
581         update_raw(self, key, value, flags)
582     }
583 
584     /// Updates many elements in batch mode in the map
585     ///
586     /// `keys` must have exactly [`Self::key_size()` * count] elements. `value` must have exactly
587     /// [`Self::key_size()` * count] elements
update_batch( &self, keys: &[u8], values: &[u8], count: u32, elem_flags: MapFlags, flags: MapFlags, ) -> Result<()>588     fn update_batch(
589         &self,
590         keys: &[u8],
591         values: &[u8],
592         count: u32,
593         elem_flags: MapFlags,
594         flags: MapFlags,
595     ) -> Result<()> {
596         if keys.len() as u32 / count != self.key_size() || (keys.len() as u32) % count != 0 {
597             return Err(Error::with_invalid_data(format!(
598                 "batch key_size {} != {} * {}",
599                 keys.len(),
600                 self.key_size(),
601                 count
602             )));
603         };
604 
605         if values.len() as u32 / count != self.value_size() || (values.len() as u32) % count != 0 {
606             return Err(Error::with_invalid_data(format!(
607                 "batch value_size {} != {} * {}",
608                 values.len(),
609                 self.value_size(),
610                 count
611             )));
612         }
613 
614         #[allow(clippy::needless_update)]
615         let opts = libbpf_sys::bpf_map_batch_opts {
616             sz: mem::size_of::<libbpf_sys::bpf_map_batch_opts>() as _,
617             elem_flags: elem_flags.bits(),
618             flags: flags.bits(),
619             // bpf_map_batch_opts might have padding fields on some platform
620             ..Default::default()
621         };
622 
623         let mut count = count;
624         let ret = unsafe {
625             libbpf_sys::bpf_map_update_batch(
626                 self.as_fd().as_raw_fd(),
627                 keys.as_ptr() as *const c_void,
628                 values.as_ptr() as *const c_void,
629                 (&mut count) as *mut u32,
630                 &opts as *const libbpf_sys::bpf_map_batch_opts,
631             )
632         };
633 
634         util::parse_ret(ret)
635     }
636 
637     /// Update an element in an per-cpu map with one value per cpu.
638     ///
639     /// `key` must have exactly [`Self::key_size()`] elements. `value` must have one
640     /// element per cpu (see [`num_possible_cpus`][crate::num_possible_cpus])
641     /// with exactly [`Self::value_size()`] elements each.
642     ///
643     /// For per-cpu maps, [`Self::update_percpu()`] must be used.
update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()>644     fn update_percpu(&self, key: &[u8], values: &[Vec<u8>], flags: MapFlags) -> Result<()> {
645         if !self.map_type().is_percpu() && self.map_type() != MapType::Unknown {
646             return Err(Error::with_invalid_data(format!(
647                 "update() must be used for maps that are not per-cpu (type of the map is {:?})",
648                 self.map_type(),
649             )));
650         }
651 
652         if values.len() != crate::num_possible_cpus()? {
653             return Err(Error::with_invalid_data(format!(
654                 "number of values {} != number of cpus {}",
655                 values.len(),
656                 crate::num_possible_cpus()?
657             )));
658         };
659 
660         let val_size = self.value_size() as usize;
661         let aligned_val_size = percpu_aligned_value_size(self);
662         let buf_size = percpu_buffer_size(self)?;
663 
664         let mut value_buf = vec![0; buf_size];
665 
666         for (i, val) in values.iter().enumerate() {
667             if val.len() != val_size {
668                 return Err(Error::with_invalid_data(format!(
669                     "value size for cpu {} is {} != {}",
670                     i,
671                     val.len(),
672                     val_size
673                 )));
674             }
675 
676             value_buf[(i * aligned_val_size)..(i * aligned_val_size + val_size)]
677                 .copy_from_slice(val);
678         }
679 
680         update_raw(self, key, &value_buf, flags)
681     }
682 }
683 
684 /// An immutable loaded BPF map.
685 pub type Map<'obj> = MapImpl<'obj>;
686 /// A mutable loaded BPF map.
687 pub type MapMut<'obj> = MapImpl<'obj, Mut>;
688 
689 /// Represents a libbpf-created map.
690 ///
691 /// Some methods require working with raw bytes. You may find libraries such as
692 /// [`plain`](https://crates.io/crates/plain) helpful.
693 #[derive(Debug)]
694 pub struct MapImpl<'obj, T = ()> {
695     ptr: NonNull<libbpf_sys::bpf_map>,
696     _phantom: PhantomData<&'obj T>,
697 }
698 
699 impl<'obj> Map<'obj> {
700     /// Create a [`Map`] from a [`libbpf_sys::bpf_map`].
new(map: &'obj libbpf_sys::bpf_map) -> Self701     pub fn new(map: &'obj libbpf_sys::bpf_map) -> Self {
702         // SAFETY: We inferred the address from a reference, which is always
703         //         valid.
704         let ptr = unsafe { NonNull::new_unchecked(map as *const _ as *mut _) };
705         assert!(
706             map_fd(ptr).is_some(),
707             "provided BPF map does not have file descriptor"
708         );
709 
710         Self {
711             ptr,
712             _phantom: PhantomData,
713         }
714     }
715 
716     /// Create a [`Map`] from a [`libbpf_sys::bpf_map`] that does not contain a
717     /// file descriptor.
718     ///
719     /// The caller has to ensure that the [`AsFd`] impl is not used, or a panic
720     /// will be the result.
721     ///
722     /// # Safety
723     ///
724     /// The pointer must point to a loaded map.
725     #[doc(hidden)]
from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self726     pub unsafe fn from_map_without_fd(ptr: NonNull<libbpf_sys::bpf_map>) -> Self {
727         Self {
728             ptr,
729             _phantom: PhantomData,
730         }
731     }
732 
733     /// Returns whether map is pinned or not flag
is_pinned(&self) -> bool734     pub fn is_pinned(&self) -> bool {
735         unsafe { libbpf_sys::bpf_map__is_pinned(self.ptr.as_ptr()) }
736     }
737 
738     /// Returns the pin_path if the map is pinned, otherwise, None is returned
get_pin_path(&self) -> Option<&OsStr>739     pub fn get_pin_path(&self) -> Option<&OsStr> {
740         let path_ptr = unsafe { libbpf_sys::bpf_map__pin_path(self.ptr.as_ptr()) };
741         if path_ptr.is_null() {
742             // means map is not pinned
743             return None;
744         }
745         let path_c_str = unsafe { CStr::from_ptr(path_ptr) };
746         Some(OsStr::from_bytes(path_c_str.to_bytes()))
747     }
748 }
749 
750 impl<'obj> MapMut<'obj> {
751     /// Create a [`MapMut`] from a [`libbpf_sys::bpf_map`].
new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self752     pub fn new_mut(map: &'obj mut libbpf_sys::bpf_map) -> Self {
753         // SAFETY: We inferred the address from a reference, which is always
754         //         valid.
755         let ptr = unsafe { NonNull::new_unchecked(map as *mut _) };
756         assert!(
757             map_fd(ptr).is_some(),
758             "provided BPF map does not have file descriptor"
759         );
760 
761         Self {
762             ptr,
763             _phantom: PhantomData,
764         }
765     }
766 
767     /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
768     /// this map to bpffs.
pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>769     pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
770         let path_c = util::path_to_cstring(path)?;
771         let path_ptr = path_c.as_ptr();
772 
773         let ret = unsafe { libbpf_sys::bpf_map__pin(self.ptr.as_ptr(), path_ptr) };
774         util::parse_ret(ret)
775     }
776 
777     /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
778     /// this map from bpffs.
unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>779     pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
780         let path_c = util::path_to_cstring(path)?;
781         let path_ptr = path_c.as_ptr();
782         let ret = unsafe { libbpf_sys::bpf_map__unpin(self.ptr.as_ptr(), path_ptr) };
783         util::parse_ret(ret)
784     }
785 
786     /// Attach a struct ops map
attach_struct_ops(&mut self) -> Result<Link>787     pub fn attach_struct_ops(&mut self) -> Result<Link> {
788         if self.map_type() != MapType::StructOps {
789             return Err(Error::with_invalid_data(format!(
790                 "Invalid map type ({:?}) for attach_struct_ops()",
791                 self.map_type(),
792             )));
793         }
794 
795         let ptr = unsafe { libbpf_sys::bpf_map__attach_struct_ops(self.ptr.as_ptr()) };
796         let ptr = validate_bpf_ret(ptr).context("failed to attach struct_ops")?;
797         // SAFETY: the pointer came from libbpf and has been checked for errors.
798         let link = unsafe { Link::new(ptr) };
799         Ok(link)
800     }
801 }
802 
803 impl<'obj> Deref for MapMut<'obj> {
804     type Target = Map<'obj>;
805 
deref(&self) -> &Self::Target806     fn deref(&self) -> &Self::Target {
807         unsafe { transmute::<&MapMut<'obj>, &Map<'obj>>(self) }
808     }
809 }
810 
811 impl<T> AsFd for MapImpl<'_, T> {
812     #[inline]
as_fd(&self) -> BorrowedFd<'_>813     fn as_fd(&self) -> BorrowedFd<'_> {
814         // SANITY: Our map must always have a file descriptor associated with
815         //         it.
816         let fd = map_fd(self.ptr).unwrap();
817         // SAFETY: `fd` is guaranteed to be valid for the lifetime of
818         //         the created object.
819         let fd = unsafe { BorrowedFd::borrow_raw(fd as _) };
820         fd
821     }
822 }
823 
824 impl<T> MapCore for MapImpl<'_, T>
825 where
826     T: Debug,
827 {
name(&self) -> &OsStr828     fn name(&self) -> &OsStr {
829         // SAFETY: We ensured `ptr` is valid during construction.
830         let name_ptr = unsafe { libbpf_sys::bpf_map__name(self.ptr.as_ptr()) };
831         // SAFETY: `bpf_map__name` can return NULL but only if it's passed
832         //          NULL. We know `ptr` is not NULL.
833         let name_c_str = unsafe { CStr::from_ptr(name_ptr) };
834         OsStr::from_bytes(name_c_str.to_bytes())
835     }
836 
837     #[inline]
map_type(&self) -> MapType838     fn map_type(&self) -> MapType {
839         let ty = unsafe { libbpf_sys::bpf_map__type(self.ptr.as_ptr()) };
840         MapType::from(ty)
841     }
842 
843     #[inline]
key_size(&self) -> u32844     fn key_size(&self) -> u32 {
845         unsafe { libbpf_sys::bpf_map__key_size(self.ptr.as_ptr()) }
846     }
847 
848     #[inline]
value_size(&self) -> u32849     fn value_size(&self) -> u32 {
850         unsafe { libbpf_sys::bpf_map__value_size(self.ptr.as_ptr()) }
851     }
852 }
853 
854 impl AsRawLibbpf for Map<'_> {
855     type LibbpfType = libbpf_sys::bpf_map;
856 
857     /// Retrieve the underlying [`libbpf_sys::bpf_map`].
858     #[inline]
as_libbpf_object(&self) -> NonNull<Self::LibbpfType>859     fn as_libbpf_object(&self) -> NonNull<Self::LibbpfType> {
860         self.ptr
861     }
862 }
863 
864 /// A handle to a map. Handles can be duplicated and dropped.
865 ///
866 /// While possible to [created directly][MapHandle::create], in many cases it is
867 /// useful to create such a handle from an existing [`Map`]:
868 /// ```no_run
869 /// # use libbpf_rs::Map;
870 /// # use libbpf_rs::MapHandle;
871 /// # let get_map = || -> &Map { todo!() };
872 /// let map: &Map = get_map();
873 /// let map_handle = MapHandle::try_from(map).unwrap();
874 /// ```
875 ///
876 /// Some methods require working with raw bytes. You may find libraries such as
877 /// [`plain`](https://crates.io/crates/plain) helpful.
878 #[derive(Debug)]
879 pub struct MapHandle {
880     fd: OwnedFd,
881     name: OsString,
882     ty: MapType,
883     key_size: u32,
884     value_size: u32,
885 }
886 
887 impl MapHandle {
888     /// Create a bpf map whose data is not managed by libbpf.
create<T: AsRef<OsStr>>( map_type: MapType, name: Option<T>, key_size: u32, value_size: u32, max_entries: u32, opts: &libbpf_sys::bpf_map_create_opts, ) -> Result<Self>889     pub fn create<T: AsRef<OsStr>>(
890         map_type: MapType,
891         name: Option<T>,
892         key_size: u32,
893         value_size: u32,
894         max_entries: u32,
895         opts: &libbpf_sys::bpf_map_create_opts,
896     ) -> Result<Self> {
897         let name = match name {
898             Some(name) => name.as_ref().to_os_string(),
899             // The old version kernel don't support specifying map name.
900             None => OsString::new(),
901         };
902         let name_c_str = CString::new(name.as_bytes()).map_err(|_| {
903             Error::with_invalid_data(format!("invalid name `{name:?}`: has NUL bytes"))
904         })?;
905         let name_c_ptr = if name.is_empty() {
906             ptr::null()
907         } else {
908             name_c_str.as_bytes_with_nul().as_ptr()
909         };
910 
911         let fd = unsafe {
912             libbpf_sys::bpf_map_create(
913                 map_type.into(),
914                 name_c_ptr.cast(),
915                 key_size,
916                 value_size,
917                 max_entries,
918                 opts,
919             )
920         };
921         let () = util::parse_ret(fd)?;
922 
923         Ok(Self {
924             // SAFETY: A file descriptor coming from the `bpf_map_create`
925             //         function is always suitable for ownership and can be
926             //         cleaned up with close.
927             fd: unsafe { OwnedFd::from_raw_fd(fd) },
928             name,
929             ty: map_type,
930             key_size,
931             value_size,
932         })
933     }
934 
935     /// Open a previously pinned map from its path.
936     ///
937     /// # Panics
938     /// If the path contains null bytes.
from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self>939     pub fn from_pinned_path<P: AsRef<Path>>(path: P) -> Result<Self> {
940         fn inner(path: &Path) -> Result<MapHandle> {
941             let p = CString::new(path.as_os_str().as_bytes()).expect("path contained null bytes");
942             let fd = parse_ret_i32(unsafe {
943                 // SAFETY
944                 // p is never null since we allocated ourselves.
945                 libbpf_sys::bpf_obj_get(p.as_ptr())
946             })?;
947             MapHandle::from_fd(unsafe {
948                 // SAFETY
949                 // A file descriptor coming from the bpf_obj_get function is always suitable for
950                 // ownership and can be cleaned up with close.
951                 OwnedFd::from_raw_fd(fd)
952             })
953         }
954 
955         inner(path.as_ref())
956     }
957 
958     /// Open a loaded map from its map id.
from_map_id(id: u32) -> Result<Self>959     pub fn from_map_id(id: u32) -> Result<Self> {
960         parse_ret_i32(unsafe {
961             // SAFETY
962             // This function is always safe to call.
963             libbpf_sys::bpf_map_get_fd_by_id(id)
964         })
965         .map(|fd| unsafe {
966             // SAFETY
967             // A file descriptor coming from the bpf_map_get_fd_by_id function is always suitable
968             // for ownership and can be cleaned up with close.
969             OwnedFd::from_raw_fd(fd)
970         })
971         .and_then(Self::from_fd)
972     }
973 
from_fd(fd: OwnedFd) -> Result<Self>974     fn from_fd(fd: OwnedFd) -> Result<Self> {
975         let info = MapInfo::new(fd.as_fd())?;
976         Ok(Self {
977             fd,
978             name: info.name()?.into(),
979             ty: info.map_type(),
980             key_size: info.info.key_size,
981             value_size: info.info.value_size,
982         })
983     }
984 
985     /// Freeze the map as read-only from user space.
986     ///
987     /// Entries from a frozen map can no longer be updated or deleted with the
988     /// bpf() system call. This operation is not reversible, and the map remains
989     /// immutable from user space until its destruction. However, read and write
990     /// permissions for BPF programs to the map remain unchanged.
freeze(&self) -> Result<()>991     pub fn freeze(&self) -> Result<()> {
992         let ret = unsafe { libbpf_sys::bpf_map_freeze(self.fd.as_raw_fd()) };
993 
994         util::parse_ret(ret)
995     }
996 
997     /// [Pin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
998     /// this map to bpffs.
pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>999     pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1000         let path_c = util::path_to_cstring(path)?;
1001         let path_ptr = path_c.as_ptr();
1002 
1003         let ret = unsafe { libbpf_sys::bpf_obj_pin(self.fd.as_raw_fd(), path_ptr) };
1004         util::parse_ret(ret)
1005     }
1006 
1007     /// [Unpin](https://facebookmicrosites.github.io/bpf/blog/2018/08/31/object-lifetime.html#bpffs)
1008     /// this map from bpffs.
unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()>1009     pub fn unpin<P: AsRef<Path>>(&mut self, path: P) -> Result<()> {
1010         remove_file(path).context("failed to remove pin map")
1011     }
1012 }
1013 
1014 impl MapCore for MapHandle {
1015     #[inline]
name(&self) -> &OsStr1016     fn name(&self) -> &OsStr {
1017         &self.name
1018     }
1019 
1020     #[inline]
map_type(&self) -> MapType1021     fn map_type(&self) -> MapType {
1022         self.ty
1023     }
1024 
1025     #[inline]
key_size(&self) -> u321026     fn key_size(&self) -> u32 {
1027         self.key_size
1028     }
1029 
1030     #[inline]
value_size(&self) -> u321031     fn value_size(&self) -> u32 {
1032         self.value_size
1033     }
1034 }
1035 
1036 impl AsFd for MapHandle {
1037     #[inline]
as_fd(&self) -> BorrowedFd<'_>1038     fn as_fd(&self) -> BorrowedFd<'_> {
1039         self.fd.as_fd()
1040     }
1041 }
1042 
1043 impl<T> TryFrom<&MapImpl<'_, T>> for MapHandle
1044 where
1045     T: Debug,
1046 {
1047     type Error = Error;
1048 
try_from(other: &MapImpl<'_, T>) -> Result<Self>1049     fn try_from(other: &MapImpl<'_, T>) -> Result<Self> {
1050         Ok(Self {
1051             fd: other
1052                 .as_fd()
1053                 .try_clone_to_owned()
1054                 .context("failed to duplicate map file descriptor")?,
1055             name: other.name().to_os_string(),
1056             ty: other.map_type(),
1057             key_size: other.key_size(),
1058             value_size: other.value_size(),
1059         })
1060     }
1061 }
1062 
1063 impl TryFrom<&MapHandle> for MapHandle {
1064     type Error = Error;
1065 
try_from(other: &MapHandle) -> Result<Self>1066     fn try_from(other: &MapHandle) -> Result<Self> {
1067         Ok(Self {
1068             fd: other
1069                 .as_fd()
1070                 .try_clone_to_owned()
1071                 .context("failed to duplicate map file descriptor")?,
1072             name: other.name().to_os_string(),
1073             ty: other.map_type(),
1074             key_size: other.key_size(),
1075             value_size: other.value_size(),
1076         })
1077     }
1078 }
1079 
1080 bitflags! {
1081     /// Flags to configure [`Map`] operations.
1082     #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
1083     pub struct MapFlags: u64 {
1084         /// See [`libbpf_sys::BPF_ANY`].
1085         const ANY      = libbpf_sys::BPF_ANY as _;
1086         /// See [`libbpf_sys::BPF_NOEXIST`].
1087         const NO_EXIST = libbpf_sys::BPF_NOEXIST as _;
1088         /// See [`libbpf_sys::BPF_EXIST`].
1089         const EXIST    = libbpf_sys::BPF_EXIST as _;
1090         /// See [`libbpf_sys::BPF_F_LOCK`].
1091         const LOCK     = libbpf_sys::BPF_F_LOCK as _;
1092     }
1093 }
1094 
1095 /// Type of a [`Map`]. Maps to `enum bpf_map_type` in kernel uapi.
1096 // If you add a new per-cpu map, also update `is_percpu`.
1097 #[non_exhaustive]
1098 #[repr(u32)]
1099 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1100 // TODO: Document members.
1101 #[allow(missing_docs)]
1102 pub enum MapType {
1103     Unspec = 0,
1104     Hash,
1105     Array,
1106     ProgArray,
1107     PerfEventArray,
1108     PercpuHash,
1109     PercpuArray,
1110     StackTrace,
1111     CgroupArray,
1112     LruHash,
1113     LruPercpuHash,
1114     LpmTrie,
1115     ArrayOfMaps,
1116     HashOfMaps,
1117     Devmap,
1118     Sockmap,
1119     Cpumap,
1120     Xskmap,
1121     Sockhash,
1122     CgroupStorage,
1123     ReuseportSockarray,
1124     PercpuCgroupStorage,
1125     Queue,
1126     Stack,
1127     SkStorage,
1128     DevmapHash,
1129     StructOps,
1130     RingBuf,
1131     InodeStorage,
1132     TaskStorage,
1133     BloomFilter,
1134     UserRingBuf,
1135     /// We choose to specify our own "unknown" type here b/c it's really up to the kernel
1136     /// to decide if it wants to reject the map. If it accepts it, it just means whoever
1137     /// using this library is a bit out of date.
1138     Unknown = u32::MAX,
1139 }
1140 
1141 impl MapType {
1142     /// Returns if the map is of one of the per-cpu types.
is_percpu(&self) -> bool1143     pub fn is_percpu(&self) -> bool {
1144         matches!(
1145             self,
1146             MapType::PercpuArray
1147                 | MapType::PercpuHash
1148                 | MapType::LruPercpuHash
1149                 | MapType::PercpuCgroupStorage
1150         )
1151     }
1152 
1153     /// Returns if the map is keyless map type as per documentation of libbpf
1154     /// Keyless map types are: Queues, Stacks and Bloom Filters
is_keyless(&self) -> bool1155     fn is_keyless(&self) -> bool {
1156         matches!(self, MapType::Queue | MapType::Stack | MapType::BloomFilter)
1157     }
1158 
1159     /// Returns if the map is of bloom filter type
is_bloom_filter(&self) -> bool1160     pub fn is_bloom_filter(&self) -> bool {
1161         MapType::BloomFilter.eq(self)
1162     }
1163 
1164     /// Detects if host kernel supports this BPF map type.
1165     ///
1166     /// Make sure the process has required set of CAP_* permissions (or runs as
1167     /// root) when performing feature checking.
is_supported(&self) -> Result<bool>1168     pub fn is_supported(&self) -> Result<bool> {
1169         let ret = unsafe { libbpf_sys::libbpf_probe_bpf_map_type(*self as u32, ptr::null()) };
1170         match ret {
1171             0 => Ok(false),
1172             1 => Ok(true),
1173             _ => Err(Error::from_raw_os_error(-ret)),
1174         }
1175     }
1176 }
1177 
1178 impl From<u32> for MapType {
from(value: u32) -> Self1179     fn from(value: u32) -> Self {
1180         use MapType::*;
1181 
1182         match value {
1183             x if x == Unspec as u32 => Unspec,
1184             x if x == Hash as u32 => Hash,
1185             x if x == Array as u32 => Array,
1186             x if x == ProgArray as u32 => ProgArray,
1187             x if x == PerfEventArray as u32 => PerfEventArray,
1188             x if x == PercpuHash as u32 => PercpuHash,
1189             x if x == PercpuArray as u32 => PercpuArray,
1190             x if x == StackTrace as u32 => StackTrace,
1191             x if x == CgroupArray as u32 => CgroupArray,
1192             x if x == LruHash as u32 => LruHash,
1193             x if x == LruPercpuHash as u32 => LruPercpuHash,
1194             x if x == LpmTrie as u32 => LpmTrie,
1195             x if x == ArrayOfMaps as u32 => ArrayOfMaps,
1196             x if x == HashOfMaps as u32 => HashOfMaps,
1197             x if x == Devmap as u32 => Devmap,
1198             x if x == Sockmap as u32 => Sockmap,
1199             x if x == Cpumap as u32 => Cpumap,
1200             x if x == Xskmap as u32 => Xskmap,
1201             x if x == Sockhash as u32 => Sockhash,
1202             x if x == CgroupStorage as u32 => CgroupStorage,
1203             x if x == ReuseportSockarray as u32 => ReuseportSockarray,
1204             x if x == PercpuCgroupStorage as u32 => PercpuCgroupStorage,
1205             x if x == Queue as u32 => Queue,
1206             x if x == Stack as u32 => Stack,
1207             x if x == SkStorage as u32 => SkStorage,
1208             x if x == DevmapHash as u32 => DevmapHash,
1209             x if x == StructOps as u32 => StructOps,
1210             x if x == RingBuf as u32 => RingBuf,
1211             x if x == InodeStorage as u32 => InodeStorage,
1212             x if x == TaskStorage as u32 => TaskStorage,
1213             x if x == BloomFilter as u32 => BloomFilter,
1214             x if x == UserRingBuf as u32 => UserRingBuf,
1215             _ => Unknown,
1216         }
1217     }
1218 }
1219 
1220 impl From<MapType> for u32 {
from(value: MapType) -> Self1221     fn from(value: MapType) -> Self {
1222         value as u32
1223     }
1224 }
1225 
1226 /// An iterator over the keys of a BPF map.
1227 #[derive(Debug)]
1228 pub struct MapKeyIter<'map> {
1229     map_fd: BorrowedFd<'map>,
1230     prev: Option<Vec<u8>>,
1231     next: Vec<u8>,
1232 }
1233 
1234 impl<'map> MapKeyIter<'map> {
new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self1235     fn new(map_fd: BorrowedFd<'map>, key_size: u32) -> Self {
1236         Self {
1237             map_fd,
1238             prev: None,
1239             next: vec![0; key_size as usize],
1240         }
1241     }
1242 }
1243 
1244 impl Iterator for MapKeyIter<'_> {
1245     type Item = Vec<u8>;
1246 
next(&mut self) -> Option<Self::Item>1247     fn next(&mut self) -> Option<Self::Item> {
1248         let prev = self.prev.as_ref().map_or(ptr::null(), |p| p.as_ptr());
1249 
1250         let ret = unsafe {
1251             libbpf_sys::bpf_map_get_next_key(
1252                 self.map_fd.as_raw_fd(),
1253                 prev as _,
1254                 self.next.as_mut_ptr() as _,
1255             )
1256         };
1257         if ret != 0 {
1258             None
1259         } else {
1260             self.prev = Some(self.next.clone());
1261             Some(self.next.clone())
1262         }
1263     }
1264 }
1265 
1266 /// A convenience wrapper for [`bpf_map_info`][libbpf_sys::bpf_map_info]. It
1267 /// provides the ability to retrieve the details of a certain map.
1268 #[derive(Debug)]
1269 pub struct MapInfo {
1270     /// The inner [`bpf_map_info`][libbpf_sys::bpf_map_info] object.
1271     pub info: bpf_map_info,
1272 }
1273 
1274 impl MapInfo {
1275     /// Create a `MapInfo` object from a fd.
new(fd: BorrowedFd<'_>) -> Result<Self>1276     pub fn new(fd: BorrowedFd<'_>) -> Result<Self> {
1277         let mut map_info = bpf_map_info::default();
1278         let mut size = mem::size_of_val(&map_info) as u32;
1279         // SAFETY: All pointers are derived from references and hence valid.
1280         let () = util::parse_ret(unsafe {
1281             bpf_obj_get_info_by_fd(
1282                 fd.as_raw_fd(),
1283                 &mut map_info as *mut bpf_map_info as *mut c_void,
1284                 &mut size as *mut u32,
1285             )
1286         })?;
1287         Ok(Self { info: map_info })
1288     }
1289 
1290     /// Get the map type
1291     #[inline]
map_type(&self) -> MapType1292     pub fn map_type(&self) -> MapType {
1293         MapType::from(self.info.type_)
1294     }
1295 
1296     /// Get the name of this map.
1297     ///
1298     /// Returns error if the underlying data in the structure is not a valid
1299     /// utf-8 string.
name<'a>(&self) -> Result<&'a str>1300     pub fn name<'a>(&self) -> Result<&'a str> {
1301         // SAFETY: convert &[i8] to &[u8], and then cast that to &str. i8 and u8 has the same size.
1302         let char_slice =
1303             unsafe { from_raw_parts(self.info.name[..].as_ptr().cast(), self.info.name.len()) };
1304 
1305         util::c_char_slice_to_cstr(char_slice)
1306             .ok_or_else(|| Error::with_invalid_data("no nul byte found"))?
1307             .to_str()
1308             .map_err(Error::with_invalid_data)
1309     }
1310 
1311     /// Get the map flags.
1312     #[inline]
flags(&self) -> MapFlags1313     pub fn flags(&self) -> MapFlags {
1314         MapFlags::from_bits_truncate(self.info.map_flags as u64)
1315     }
1316 }
1317 
1318 #[cfg(test)]
1319 mod tests {
1320     use super::*;
1321 
1322     use std::mem::discriminant;
1323 
1324     #[test]
map_type()1325     fn map_type() {
1326         use MapType::*;
1327 
1328         for t in [
1329             Unspec,
1330             Hash,
1331             Array,
1332             ProgArray,
1333             PerfEventArray,
1334             PercpuHash,
1335             PercpuArray,
1336             StackTrace,
1337             CgroupArray,
1338             LruHash,
1339             LruPercpuHash,
1340             LpmTrie,
1341             ArrayOfMaps,
1342             HashOfMaps,
1343             Devmap,
1344             Sockmap,
1345             Cpumap,
1346             Xskmap,
1347             Sockhash,
1348             CgroupStorage,
1349             ReuseportSockarray,
1350             PercpuCgroupStorage,
1351             Queue,
1352             Stack,
1353             SkStorage,
1354             DevmapHash,
1355             StructOps,
1356             RingBuf,
1357             InodeStorage,
1358             TaskStorage,
1359             BloomFilter,
1360             UserRingBuf,
1361             Unknown,
1362         ] {
1363             // check if discriminants match after a roundtrip conversion
1364             assert_eq!(discriminant(&t), discriminant(&MapType::from(t as u32)));
1365         }
1366     }
1367 }
1368