use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_bindings::bindings::bpf_map_type::{ BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, }; use aya_bpf_cty::{c_long, c_void}; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_HASH}, helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem}, maps::PinningType, }; #[repr(transparent)] pub struct HashMap { def: UnsafeCell, _k: PhantomData, _v: PhantomData, } unsafe impl Sync for HashMap {} impl HashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap { HashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::None, )), _k: PhantomData, _v: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> HashMap { HashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::ByName, )), _k: PhantomData, _v: PhantomData, } } /// Retrieve the value associate with `key` from the map. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the /// map might get aliased by another element in the map, causing garbage to be read, or /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { get(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { get_ptr(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, and additionally cares should be taken to avoid /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { get_ptr_mut(self.def.get(), key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(self.def.get(), key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { remove(self.def.get(), key) } } #[repr(transparent)] pub struct LruHashMap { def: UnsafeCell, _k: PhantomData, _v: PhantomData, } unsafe impl Sync for LruHashMap {} impl LruHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::None, )), _k: PhantomData, _v: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::ByName, )), _k: PhantomData, _v: PhantomData, } } /// Retrieve the value associate with `key` from the map. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the /// map might get aliased by another element in the map, causing garbage to be read, or /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { get(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { get_ptr(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, and additionally cares should be taken to avoid /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { get_ptr_mut(self.def.get(), key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(self.def.get(), key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { remove(self.def.get(), key) } } #[repr(transparent)] pub struct PerCpuHashMap { def: UnsafeCell, _k: PhantomData, _v: PhantomData, } unsafe impl Sync for PerCpuHashMap {} impl PerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_PERCPU_HASH, max_entries, flags, PinningType::None, )), _k: PhantomData, _v: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_PERCPU_HASH, max_entries, flags, PinningType::ByName, )), _k: PhantomData, _v: PhantomData, } } /// Retrieve the value associate with `key` from the map. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the /// map might get aliased by another element in the map, causing garbage to be read, or /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { get(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { get_ptr(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, and additionally cares should be taken to avoid /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { get_ptr_mut(self.def.get(), key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(self.def.get(), key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { remove(self.def.get(), key) } } #[repr(transparent)] pub struct LruPerCpuHashMap { def: UnsafeCell, _k: PhantomData, _v: PhantomData, } unsafe impl Sync for LruPerCpuHashMap {} impl LruPerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_PERCPU_HASH, max_entries, flags, PinningType::None, )), _k: PhantomData, _v: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_PERCPU_HASH, max_entries, flags, PinningType::ByName, )), _k: PhantomData, _v: PhantomData, } } /// Retrieve the value associate with `key` from the map. /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the /// map might get aliased by another element in the map, causing garbage to be read, or /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { get(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { get_ptr(self.def.get(), key) } /// Retrieve the value associate with `key` from the map. /// The same caveat as `get` applies, and additionally cares should be taken to avoid /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { get_ptr_mut(self.def.get(), key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(self.def.get(), key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { remove(self.def.get(), key) } } const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { bpf_map_def { type_: ty, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, map_flags: flags, id: 0, pinning: pin as u32, } } #[inline] fn get_ptr_mut(def: *mut bpf_map_def, key: &K) -> Option<*mut V> { unsafe { let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void); // FIXME: alignment NonNull::new(value as *mut V).map(|p| p.as_ptr()) } } #[inline] fn get_ptr(def: *mut bpf_map_def, key: &K) -> Option<*const V> { get_ptr_mut(def, key).map(|p| p as *const V) } #[inline] unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> { get_ptr(def, key).map(|p| &*p) } #[inline] fn insert(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { let ret = unsafe { bpf_map_update_elem( def as *mut _, key as *const _ as *const _, value as *const _ as *const _, flags, ) }; (ret == 0).then_some(()).ok_or(ret) } #[inline] fn remove(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> { let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) }; (ret == 0).then_some(()).ok_or(ret) }