|
|
@ -1,4 +1,4 @@
|
|
|
|
use core::{marker::PhantomData, mem, ptr::NonNull};
|
|
|
|
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
|
|
|
|
|
|
|
|
|
|
|
|
use aya_bpf_bindings::bindings::bpf_map_type::{
|
|
|
|
use aya_bpf_bindings::bindings::bpf_map_type::{
|
|
|
|
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
|
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
@ -13,15 +13,22 @@ use crate::{
|
|
|
|
|
|
|
|
|
|
|
|
#[repr(transparent)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct HashMap<K, V> {
|
|
|
|
pub struct HashMap<K, V> {
|
|
|
|
def: bpf_map_def,
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
|
|
|
|
|
|
|
|
|
|
|
|
impl<K, V> HashMap<K, V> {
|
|
|
|
impl<K, V> HashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
|
|
|
|
HashMap {
|
|
|
|
HashMap {
|
|
|
|
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::None),
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
|
|
|
|
BPF_MAP_TYPE_HASH,
|
|
|
|
|
|
|
|
max_entries,
|
|
|
|
|
|
|
|
flags,
|
|
|
|
|
|
|
|
PinningType::None,
|
|
|
|
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -29,44 +36,73 @@ impl<K, V> HashMap<K, V> {
|
|
|
|
|
|
|
|
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
|
|
|
|
HashMap {
|
|
|
|
HashMap {
|
|
|
|
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::ByName),
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
|
|
|
|
BPF_MAP_TYPE_HASH,
|
|
|
|
|
|
|
|
max_entries,
|
|
|
|
|
|
|
|
flags,
|
|
|
|
|
|
|
|
PinningType::ByName,
|
|
|
|
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
|
|
|
|
|
|
|
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
|
|
|
|
|
|
|
/// map might get aliased by another element in the map, causing garbage to be read, or
|
|
|
|
|
|
|
|
/// corruption in case of writes.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get(&mut self, key: &K) -> Option<&V> {
|
|
|
|
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
|
|
|
get(&mut self.def, key)
|
|
|
|
get(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
|
|
|
|
|
|
|
/// to decide whether it's safe to dereference the pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
|
|
|
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
|
|
|
get_mut(&mut self.def, key)
|
|
|
|
get_ptr(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
|
|
|
|
|
|
|
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
|
|
|
|
|
|
|
/// pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
|
|
|
insert(&mut self.def, key, value, flags)
|
|
|
|
get_ptr_mut(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
|
|
|
|
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
remove(&mut self.def, key)
|
|
|
|
insert(self.def.get(), key, value, flags)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
|
|
|
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
|
|
|
|
|
|
|
remove(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[repr(transparent)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct LruHashMap<K, V> {
|
|
|
|
pub struct LruHashMap<K, V> {
|
|
|
|
def: bpf_map_def,
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
|
|
|
|
|
|
|
|
|
|
|
|
impl<K, V> LruHashMap<K, V> {
|
|
|
|
impl<K, V> LruHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
|
|
|
|
LruHashMap {
|
|
|
|
LruHashMap {
|
|
|
|
def: build_def::<K, V>(BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::None),
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
|
|
|
|
BPF_MAP_TYPE_LRU_HASH,
|
|
|
|
|
|
|
|
max_entries,
|
|
|
|
|
|
|
|
flags,
|
|
|
|
|
|
|
|
PinningType::None,
|
|
|
|
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -74,54 +110,73 @@ impl<K, V> LruHashMap<K, V> {
|
|
|
|
|
|
|
|
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
|
|
|
|
LruHashMap {
|
|
|
|
LruHashMap {
|
|
|
|
def: build_def::<K, V>(
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
BPF_MAP_TYPE_LRU_HASH,
|
|
|
|
BPF_MAP_TYPE_LRU_HASH,
|
|
|
|
max_entries,
|
|
|
|
max_entries,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
PinningType::ByName,
|
|
|
|
PinningType::ByName,
|
|
|
|
),
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
|
|
|
|
|
|
|
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
|
|
|
|
|
|
|
/// map might get aliased by another element in the map, causing garbage to be read, or
|
|
|
|
|
|
|
|
/// corruption in case of writes.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get(&mut self, key: &K) -> Option<&V> {
|
|
|
|
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
|
|
|
get(&mut self.def, key)
|
|
|
|
get(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
|
|
|
|
|
|
|
/// to decide whether it's safe to dereference the pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
|
|
|
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
|
|
|
get_mut(&mut self.def, key)
|
|
|
|
get_ptr(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
|
|
|
|
|
|
|
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
|
|
|
|
|
|
|
/// pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
|
|
|
insert(&mut self.def, key, value, flags)
|
|
|
|
get_ptr_mut(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
|
|
|
|
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
remove(&mut self.def, key)
|
|
|
|
insert(self.def.get(), key, value, flags)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
|
|
|
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
|
|
|
|
|
|
|
remove(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[repr(transparent)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct PerCpuHashMap<K, V> {
|
|
|
|
pub struct PerCpuHashMap<K, V> {
|
|
|
|
def: bpf_map_def,
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
|
|
|
|
|
|
|
|
|
|
|
|
impl<K, V> PerCpuHashMap<K, V> {
|
|
|
|
impl<K, V> PerCpuHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
|
|
|
|
PerCpuHashMap {
|
|
|
|
PerCpuHashMap {
|
|
|
|
def: build_def::<K, V>(
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
|
BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
|
max_entries,
|
|
|
|
max_entries,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
PinningType::None,
|
|
|
|
PinningType::None,
|
|
|
|
),
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -129,54 +184,73 @@ impl<K, V> PerCpuHashMap<K, V> {
|
|
|
|
|
|
|
|
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
|
|
|
|
PerCpuHashMap {
|
|
|
|
PerCpuHashMap {
|
|
|
|
def: build_def::<K, V>(
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
|
BPF_MAP_TYPE_PERCPU_HASH,
|
|
|
|
max_entries,
|
|
|
|
max_entries,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
PinningType::ByName,
|
|
|
|
PinningType::ByName,
|
|
|
|
),
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
|
|
|
|
|
|
|
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
|
|
|
|
|
|
|
/// map might get aliased by another element in the map, causing garbage to be read, or
|
|
|
|
|
|
|
|
/// corruption in case of writes.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get(&mut self, key: &K) -> Option<&V> {
|
|
|
|
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
|
|
|
get(&mut self.def, key)
|
|
|
|
get(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
|
|
|
|
|
|
|
/// to decide whether it's safe to dereference the pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
|
|
|
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
|
|
|
get_mut(&mut self.def, key)
|
|
|
|
get_ptr(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
|
|
|
|
|
|
|
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
|
|
|
|
|
|
|
/// pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
|
|
|
insert(&mut self.def, key, value, flags)
|
|
|
|
get_ptr_mut(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
|
|
|
|
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
remove(&mut self.def, key)
|
|
|
|
insert(self.def.get(), key, value, flags)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
|
|
|
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
|
|
|
|
|
|
|
remove(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[repr(transparent)]
|
|
|
|
#[repr(transparent)]
|
|
|
|
pub struct LruPerCpuHashMap<K, V> {
|
|
|
|
pub struct LruPerCpuHashMap<K, V> {
|
|
|
|
def: bpf_map_def,
|
|
|
|
def: UnsafeCell<bpf_map_def>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_k: PhantomData<K>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
_v: PhantomData<V>,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
|
|
|
|
|
|
|
|
|
|
|
|
impl<K, V> LruPerCpuHashMap<K, V> {
|
|
|
|
impl<K, V> LruPerCpuHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
|
|
|
|
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
|
|
|
|
LruPerCpuHashMap {
|
|
|
|
LruPerCpuHashMap {
|
|
|
|
def: build_def::<K, V>(
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
|
|
|
BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
|
|
|
max_entries,
|
|
|
|
max_entries,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
PinningType::None,
|
|
|
|
PinningType::None,
|
|
|
|
),
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -184,35 +258,52 @@ impl<K, V> LruPerCpuHashMap<K, V> {
|
|
|
|
|
|
|
|
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
|
|
|
|
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
|
|
|
|
LruPerCpuHashMap {
|
|
|
|
LruPerCpuHashMap {
|
|
|
|
def: build_def::<K, V>(
|
|
|
|
def: UnsafeCell::new(build_def::<K, V>(
|
|
|
|
BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
|
|
|
BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
|
|
|
max_entries,
|
|
|
|
max_entries,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
|
PinningType::ByName,
|
|
|
|
PinningType::ByName,
|
|
|
|
),
|
|
|
|
)),
|
|
|
|
_k: PhantomData,
|
|
|
|
_k: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
_v: PhantomData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
|
|
|
|
|
|
|
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
|
|
|
|
|
|
|
/// map might get aliased by another element in the map, causing garbage to be read, or
|
|
|
|
|
|
|
|
/// corruption in case of writes.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get(&mut self, key: &K) -> Option<&V> {
|
|
|
|
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
|
|
|
get(&mut self.def, key)
|
|
|
|
get(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
|
|
|
|
|
|
|
/// to decide whether it's safe to dereference the pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
|
|
|
|
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
|
|
|
get_mut(&mut self.def, key)
|
|
|
|
get_ptr(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/// Retrieve the value associate with `key` from the map.
|
|
|
|
|
|
|
|
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
|
|
|
|
|
|
|
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
|
|
|
|
|
|
|
/// pointer or not.
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
|
|
|
insert(&mut self.def, key, value, flags)
|
|
|
|
get_ptr_mut(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
|
|
|
|
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
remove(&mut self.def, key)
|
|
|
|
insert(self.def.get(), key, value, flags)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
|
|
|
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
|
|
|
|
|
|
|
remove(self.def.get(), key)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -229,28 +320,29 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> {
|
|
|
|
fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
|
|
|
|
unsafe {
|
|
|
|
unsafe {
|
|
|
|
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
|
|
|
|
let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
|
|
|
|
// FIXME: alignment
|
|
|
|
// FIXME: alignment
|
|
|
|
NonNull::new(value as *mut V).map(|p| p.as_ref())
|
|
|
|
NonNull::new(value as *mut V).map(|p| p.as_ptr())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
fn get_mut<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a mut V> {
|
|
|
|
fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
|
|
|
|
unsafe {
|
|
|
|
get_ptr_mut(def, key).map(|p| p as *const V)
|
|
|
|
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
|
|
|
|
|
|
|
|
// FIXME: alignment
|
|
|
|
|
|
|
|
NonNull::new(value as *mut V).map(|mut p| p.as_mut())
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
|
|
|
|
unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
|
|
|
|
|
|
|
|
get_ptr(def, key).map(|p| &*p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
|
|
|
let ret = unsafe {
|
|
|
|
let ret = unsafe {
|
|
|
|
bpf_map_update_elem(
|
|
|
|
bpf_map_update_elem(
|
|
|
|
def as *mut _ as *mut _,
|
|
|
|
def as *mut _,
|
|
|
|
key as *const _ as *const _,
|
|
|
|
key as *const _ as *const _,
|
|
|
|
value as *const _ as *const _,
|
|
|
|
value as *const _ as *const _,
|
|
|
|
flags,
|
|
|
|
flags,
|
|
|
@ -260,8 +352,7 @@ fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
#[inline]
|
|
|
|
fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> {
|
|
|
|
fn remove<K>(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> {
|
|
|
|
let ret =
|
|
|
|
let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
|
|
|
|
unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) };
|
|
|
|
|
|
|
|
(ret >= 0).then(|| ()).ok_or(ret)
|
|
|
|
(ret >= 0).then(|| ()).ok_or(ret)
|
|
|
|
}
|
|
|
|
}
|
|
|
|