diff --git a/bpf/aya-bpf/src/maps/hash_map.rs b/bpf/aya-bpf/src/maps/hash_map.rs index 5877be90..a6e6b28a 100644 --- a/bpf/aya-bpf/src/maps/hash_map.rs +++ b/bpf/aya-bpf/src/maps/hash_map.rs @@ -2,6 +2,7 @@ use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_bindings::bindings::bpf_map_type::{ BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_XSKMAP, }; use aya_bpf_cty::{c_long, c_void}; @@ -307,6 +308,80 @@ impl LruPerCpuHashMap { } } +#[repr(transparent)] +pub struct XskMap { + def: UnsafeCell, + _k: PhantomData, + _v: PhantomData, +} + +unsafe impl Sync for XskMap {} + +impl XskMap { + pub const fn with_max_entries(max_entries: u32, flags: u32) -> XskMap { + XskMap { + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_XSKMAP, + max_entries, + flags, + PinningType::None, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + pub const fn pinned(max_entries: u32, flags: u32) -> XskMap { + XskMap { + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_XSKMAP, + max_entries, + flags, + PinningType::ByName, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. + #[inline] + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. + #[inline] + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key) + } + + #[inline] + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get(), key, value, flags) + } + + #[inline] + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get(), key) + } +} + const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { bpf_map_def { type_: ty,