Merge pull request #290 from ishitatsuyuki/immut-self

bpf: Replace map types to use &self, remove HashMap::get_mut
pull/317/head
Alessandro Decina 2 years ago committed by GitHub
commit 1eb9ef5488
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull}; use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -10,14 +10,16 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct Array<T> { pub struct Array<T> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>, _t: PhantomData<T>,
} }
unsafe impl<T: Sync> Sync for Array<T> {}
impl<T> Array<T> { impl<T> Array<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> {
Array { Array {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY, type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -25,14 +27,14 @@ impl<T> Array<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> { pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> {
Array { Array {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY, type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -40,15 +42,15 @@ impl<T> Array<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub fn get(&mut self, index: u32) -> Option<&T> { pub fn get(&self, index: u32) -> Option<&T> {
unsafe { unsafe {
let value = bpf_map_lookup_elem( let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
&index as *const _ as *const c_void, &index as *const _ as *const c_void,
); );
// FIXME: alignment // FIXME: alignment

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull}; use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_map_type::{ use aya_bpf_bindings::bindings::bpf_map_type::{
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
@ -13,15 +13,22 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct HashMap<K, V> { pub struct HashMap<K, V> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
_v: PhantomData<V>, _v: PhantomData<V>,
} }
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
impl<K, V> HashMap<K, V> { impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap { HashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::None), def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
@ -29,44 +36,73 @@ impl<K, V> HashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap { HashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::ByName), def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
} }
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get(&mut self, key: &K) -> Option<&V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get(&mut self.def, key) get_ptr(self.def.get(), key)
} }
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline] #[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_mut(&mut self.def, key) get_ptr_mut(self.def.get(), key)
} }
#[inline] #[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(self.def.get(), key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(self.def.get(), key)
} }
} }
#[repr(transparent)] #[repr(transparent)]
pub struct LruHashMap<K, V> { pub struct LruHashMap<K, V> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
_v: PhantomData<V>, _v: PhantomData<V>,
} }
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> { impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap { LruHashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::None), def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
@ -74,54 +110,73 @@ impl<K, V> LruHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap { LruHashMap {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_HASH,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
} }
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get(&mut self, key: &K) -> Option<&V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get(&mut self.def, key) get_ptr(self.def.get(), key)
} }
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline] #[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_mut(&mut self.def, key) get_ptr_mut(self.def.get(), key)
} }
#[inline] #[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(self.def.get(), key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(self.def.get(), key)
} }
} }
#[repr(transparent)] #[repr(transparent)]
pub struct PerCpuHashMap<K, V> { pub struct PerCpuHashMap<K, V> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
_v: PhantomData<V>, _v: PhantomData<V>,
} }
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> { impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap { PerCpuHashMap {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
@ -129,54 +184,73 @@ impl<K, V> PerCpuHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap { PerCpuHashMap {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
} }
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get(&mut self, key: &K) -> Option<&V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get(&mut self.def, key) get_ptr(self.def.get(), key)
} }
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline] #[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_mut(&mut self.def, key) get_ptr_mut(self.def.get(), key)
} }
#[inline] #[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(self.def.get(), key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(self.def.get(), key)
} }
} }
#[repr(transparent)] #[repr(transparent)]
pub struct LruPerCpuHashMap<K, V> { pub struct LruPerCpuHashMap<K, V> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
_v: PhantomData<V>, _v: PhantomData<V>,
} }
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
impl<K, V> LruPerCpuHashMap<K, V> { impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap { LruPerCpuHashMap {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
@ -184,35 +258,52 @@ impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap { LruPerCpuHashMap {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
} }
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get(&mut self, key: &K) -> Option<&V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get(&mut self.def, key) get_ptr(self.def.get(), key)
} }
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline] #[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_mut(&mut self.def, key) get_ptr_mut(self.def.get(), key)
} }
#[inline] #[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(self.def.get(), key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(self.def.get(), key)
} }
} }
@ -229,28 +320,29 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
} }
#[inline] #[inline]
fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> { fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
unsafe { unsafe {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
// FIXME: alignment // FIXME: alignment
NonNull::new(value as *mut V).map(|p| p.as_ref()) NonNull::new(value as *mut V).map(|p| p.as_ptr())
} }
} }
#[inline] #[inline]
fn get_mut<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a mut V> { fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
unsafe { get_ptr_mut(def, key).map(|p| p as *const V)
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); }
// FIXME: alignment
NonNull::new(value as *mut V).map(|mut p| p.as_mut()) #[inline]
} unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| &*p)
} }
#[inline] #[inline]
fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe { let ret = unsafe {
bpf_map_update_elem( bpf_map_update_elem(
def as *mut _ as *mut _, def as *mut _,
key as *const _ as *const _, key as *const _ as *const _,
value as *const _ as *const _, value as *const _ as *const _,
flags, flags,
@ -260,8 +352,7 @@ fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result
} }
#[inline] #[inline]
fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> { fn remove<K>(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> {
let ret = let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) };
(ret >= 0).then(|| ()).ok_or(ret) (ret >= 0).then(|| ()).ok_or(ret)
} }

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull}; use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::{c_long, c_void}; use aya_bpf_cty::{c_long, c_void};
@ -10,11 +10,13 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct LpmTrie<K, V> { pub struct LpmTrie<K, V> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
_v: PhantomData<V>, _v: PhantomData<V>,
} }
unsafe impl<K: Sync, V: Sync> Sync for LpmTrie<K, V> {}
#[repr(packed)] #[repr(packed)]
pub struct Key<K> { pub struct Key<K> {
/// Represents the number of bytes matched against. /// Represents the number of bytes matched against.
@ -32,7 +34,12 @@ impl<K> Key<K> {
impl<K, V> LpmTrie<K, V> { impl<K, V> LpmTrie<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LpmTrie<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
LpmTrie { LpmTrie {
def: build_def::<K, V>(BPF_MAP_TYPE_LPM_TRIE, max_entries, flags, PinningType::None), def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
@ -40,34 +47,32 @@ impl<K, V> LpmTrie<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LpmTrie<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
LpmTrie { LpmTrie {
def: build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_LPM_TRIE,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
), )),
_k: PhantomData, _k: PhantomData,
_v: PhantomData, _v: PhantomData,
} }
} }
#[inline] #[inline]
pub fn get(&mut self, key: &Key<K>) -> Option<&V> { pub fn get(&self, key: &Key<K>) -> Option<&V> {
unsafe { unsafe {
let value = bpf_map_lookup_elem( let value =
&mut self.def as *mut _ as *mut _, bpf_map_lookup_elem(self.def.get() as *mut _, key as *const _ as *const c_void);
key as *const _ as *const c_void,
);
// FIXME: alignment // FIXME: alignment
NonNull::new(value as *mut V).map(|p| p.as_ref()) NonNull::new(value as *mut V).map(|p| p.as_ref())
} }
} }
#[inline] #[inline]
pub fn insert(&mut self, key: &Key<K>, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &Key<K>, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe { let ret = unsafe {
bpf_map_update_elem( bpf_map_update_elem(
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
key as *const _ as *const _, key as *const _ as *const _,
value as *const _ as *const _, value as *const _ as *const _,
flags, flags,
@ -77,12 +82,9 @@ impl<K, V> LpmTrie<K, V> {
} }
#[inline] #[inline]
pub fn remove(&mut self, key: &Key<K>) -> Result<(), c_long> { pub fn remove(&self, key: &Key<K>) -> Result<(), c_long> {
let ret = unsafe { let ret = unsafe {
bpf_map_delete_elem( bpf_map_delete_elem(self.def.get() as *mut _, key as *const _ as *const c_void)
&mut self.def as *mut _ as *mut _,
key as *const _ as *const c_void,
)
}; };
(ret >= 0).then(|| ()).ok_or(ret) (ret >= 0).then(|| ()).ok_or(ret)
} }

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull}; use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -10,14 +10,16 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct PerCpuArray<T> { pub struct PerCpuArray<T> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>, _t: PhantomData<T>,
} }
unsafe impl<T: Sync> Sync for PerCpuArray<T> {}
impl<T> PerCpuArray<T> { impl<T> PerCpuArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray { PerCpuArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY, type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -25,14 +27,14 @@ impl<T> PerCpuArray<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray { PerCpuArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY, type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -40,13 +42,13 @@ impl<T> PerCpuArray<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
#[inline(always)] #[inline(always)]
pub fn get(&mut self, index: u32) -> Option<&T> { pub fn get(&self, index: u32) -> Option<&T> {
unsafe { unsafe {
// FIXME: alignment // FIXME: alignment
self.lookup(index).map(|p| p.as_ref()) self.lookup(index).map(|p| p.as_ref())
@ -54,7 +56,7 @@ impl<T> PerCpuArray<T> {
} }
#[inline(always)] #[inline(always)]
pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { pub fn get_mut(&self, index: u32) -> Option<&mut T> {
unsafe { unsafe {
// FIXME: alignment // FIXME: alignment
self.lookup(index).map(|mut p| p.as_mut()) self.lookup(index).map(|mut p| p.as_mut())
@ -62,9 +64,9 @@ impl<T> PerCpuArray<T> {
} }
#[inline(always)] #[inline(always)]
unsafe fn lookup(&mut self, index: u32) -> Option<NonNull<T>> { unsafe fn lookup(&self, index: u32) -> Option<NonNull<T>> {
let ptr = bpf_map_lookup_elem( let ptr = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
&index as *const _ as *const c_void, &index as *const _ as *const c_void,
); );
NonNull::new(ptr as *mut T) NonNull::new(ptr as *mut T)

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{cell::UnsafeCell, marker::PhantomData, mem};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU}, bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
@ -9,10 +9,12 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct PerfEventArray<T> { pub struct PerfEventArray<T> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>, _t: PhantomData<T>,
} }
unsafe impl<T: Sync> Sync for PerfEventArray<T> {}
impl<T> PerfEventArray<T> { impl<T> PerfEventArray<T> {
pub const fn new(flags: u32) -> PerfEventArray<T> { pub const fn new(flags: u32) -> PerfEventArray<T> {
PerfEventArray::with_max_entries(0, flags) PerfEventArray::with_max_entries(0, flags)
@ -20,7 +22,7 @@ impl<T> PerfEventArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray { PerfEventArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -28,14 +30,14 @@ impl<T> PerfEventArray<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray<T> { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray { PerfEventArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -43,21 +45,21 @@ impl<T> PerfEventArray<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub fn output<C: BpfContext>(&mut self, ctx: &C, data: &T, flags: u32) { pub fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u32) {
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags) self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
} }
pub fn output_at_index<C: BpfContext>(&mut self, ctx: &C, index: u32, data: &T, flags: u32) { pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &T, flags: u32) {
let flags = (flags as u64) << 32 | index as u64; let flags = (flags as u64) << 32 | index as u64;
unsafe { unsafe {
bpf_perf_event_output( bpf_perf_event_output(
ctx.as_ptr(), ctx.as_ptr(),
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
flags, flags,
data as *const _ as *mut _, data as *const _ as *mut _,
mem::size_of::<T>() as u64, mem::size_of::<T>() as u64,

@ -1,4 +1,4 @@
use core::mem; use core::{cell::UnsafeCell, mem};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU}, bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
@ -9,9 +9,11 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct PerfEventByteArray { pub struct PerfEventByteArray {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
} }
unsafe impl Sync for PerfEventByteArray {}
impl PerfEventByteArray { impl PerfEventByteArray {
pub const fn new(flags: u32) -> PerfEventByteArray { pub const fn new(flags: u32) -> PerfEventByteArray {
PerfEventByteArray::with_max_entries(0, flags) PerfEventByteArray::with_max_entries(0, flags)
@ -19,7 +21,7 @@ impl PerfEventByteArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray { PerfEventByteArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -27,13 +29,13 @@ impl PerfEventByteArray {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray { PerfEventByteArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -41,20 +43,20 @@ impl PerfEventByteArray {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
} }
} }
pub fn output<C: BpfContext>(&mut self, ctx: &C, data: &[u8], flags: u32) { pub fn output<C: BpfContext>(&self, ctx: &C, data: &[u8], flags: u32) {
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags) self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
} }
pub fn output_at_index<C: BpfContext>(&mut self, ctx: &C, index: u32, data: &[u8], flags: u32) { pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &[u8], flags: u32) {
let flags = (flags as u64) << 32 | index as u64; let flags = (flags as u64) << 32 | index as u64;
unsafe { unsafe {
bpf_perf_event_output( bpf_perf_event_output(
ctx.as_ptr(), ctx.as_ptr(),
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
flags, flags,
data.as_ptr() as *mut _, data.as_ptr() as *mut _,
data.len() as u64, data.len() as u64,

@ -1,4 +1,4 @@
use core::{hint::unreachable_unchecked, mem}; use core::{cell::UnsafeCell, hint::unreachable_unchecked, mem};
use aya_bpf_cty::c_long; use aya_bpf_cty::c_long;
@ -19,7 +19,7 @@ use crate::{
/// # use aya_bpf::{programs::LsmContext}; /// # use aya_bpf::{programs::LsmContext};
/// ///
/// #[map] /// #[map]
/// static mut JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0); /// static JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0);
/// ///
/// # unsafe fn try_test(ctx: &LsmContext) -> Result<(), c_long> { /// # unsafe fn try_test(ctx: &LsmContext) -> Result<(), c_long> {
/// let index: u32 = 13; /// let index: u32 = 13;
@ -33,13 +33,15 @@ use crate::{
/// ``` /// ```
#[repr(transparent)] #[repr(transparent)]
pub struct ProgramArray { pub struct ProgramArray {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
} }
unsafe impl Sync for ProgramArray {}
impl ProgramArray { impl ProgramArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray { ProgramArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY, type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -47,13 +49,13 @@ impl ProgramArray {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray { pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray { ProgramArray {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY, type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -61,7 +63,7 @@ impl ProgramArray {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
} }
} }
@ -78,8 +80,8 @@ impl ProgramArray {
/// ///
/// On success, this function **does not return** into the original program. /// On success, this function **does not return** into the original program.
/// On failure, a negative error is returned, wrapped in `Err()`. /// On failure, a negative error is returned, wrapped in `Err()`.
pub unsafe fn tail_call<C: BpfContext>(&mut self, ctx: &C, index: u32) -> Result<!, c_long> { pub unsafe fn tail_call<C: BpfContext>(&self, ctx: &C, index: u32) -> Result<!, c_long> {
let res = bpf_tail_call(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, index); let res = bpf_tail_call(ctx.as_ptr(), self.def.get() as *mut _, index);
if res != 0 { if res != 0 {
Err(res) Err(res)
} else { } else {

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{cell::UnsafeCell, marker::PhantomData, mem};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE}, bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE},
@ -8,14 +8,16 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct Queue<T> { pub struct Queue<T> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>, _t: PhantomData<T>,
} }
unsafe impl<T: Sync> Sync for Queue<T> {}
impl<T> Queue<T> { impl<T> Queue<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> {
Queue { Queue {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE, type_: BPF_MAP_TYPE_QUEUE,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -23,14 +25,14 @@ impl<T> Queue<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> { pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> {
Queue { Queue {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE, type_: BPF_MAP_TYPE_QUEUE,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
@ -38,15 +40,15 @@ impl<T> Queue<T> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
_t: PhantomData, _t: PhantomData,
} }
} }
pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe { let ret = unsafe {
bpf_map_push_elem( bpf_map_push_elem(
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
value as *const _ as *const _, value as *const _ as *const _,
flags, flags,
) )
@ -54,13 +56,10 @@ impl<T> Queue<T> {
(ret >= 0).then(|| ()).ok_or(ret) (ret >= 0).then(|| ()).ok_or(ret)
} }
pub fn pop(&mut self) -> Option<T> { pub fn pop(&self) -> Option<T> {
unsafe { unsafe {
let mut value = mem::MaybeUninit::uninit(); let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_pop_elem( let ret = bpf_map_pop_elem(self.def.get() as *mut _, value.as_mut_ptr() as *mut _);
&mut self.def as *mut _ as *mut _,
value.as_mut_ptr() as *mut _,
);
(ret >= 0).then(|| value.assume_init()) (ret >= 0).then(|| value.assume_init())
} }
} }

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{cell::UnsafeCell, marker::PhantomData, mem};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -15,14 +15,16 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct SockHash<K> { pub struct SockHash<K> {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: PhantomData<K>,
} }
unsafe impl<K: Sync> Sync for SockHash<K> {}
impl<K> SockHash<K> { impl<K> SockHash<K> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash { SockHash {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH, type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32, key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -30,14 +32,14 @@ impl<K> SockHash<K> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
_k: PhantomData, _k: PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> { pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash { SockHash {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH, type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32, key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -45,21 +47,16 @@ impl<K> SockHash<K> {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
_k: PhantomData, _k: PhantomData,
} }
} }
pub fn update( pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> {
&mut self,
key: &mut K,
sk_ops: &mut bpf_sock_ops,
flags: u64,
) -> Result<(), i64> {
let ret = unsafe { let ret = unsafe {
bpf_sock_hash_update( bpf_sock_hash_update(
sk_ops as *mut _, sk_ops as *mut _,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
key as *mut _ as *mut c_void, key as *mut _ as *mut c_void,
flags, flags,
) )
@ -67,22 +64,22 @@ impl<K> SockHash<K> {
(ret >= 0).then(|| ()).ok_or(ret) (ret >= 0).then(|| ()).ok_or(ret)
} }
pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
unsafe { unsafe {
bpf_msg_redirect_hash( bpf_msg_redirect_hash(
ctx.as_ptr() as *mut _, ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
key as *mut _ as *mut _, key as *mut _ as *mut _,
flags, flags,
) )
} }
} }
pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
unsafe { unsafe {
bpf_sk_redirect_hash( bpf_sk_redirect_hash(
ctx.as_ptr() as *mut _, ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
key as *mut _ as *mut _, key as *mut _ as *mut _,
flags, flags,
) )

@ -1,4 +1,4 @@
use core::mem; use core::{cell::UnsafeCell, mem};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -15,13 +15,15 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct SockMap { pub struct SockMap {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
} }
unsafe impl Sync for SockMap {}
impl SockMap { impl SockMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap {
SockMap { SockMap {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP, type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -29,13 +31,13 @@ impl SockMap {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> SockMap { pub const fn pinned(max_entries: u32, flags: u32) -> SockMap {
SockMap { SockMap {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP, type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
@ -43,19 +45,19 @@ impl SockMap {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
} }
} }
pub unsafe fn update( pub unsafe fn update(
&mut self, &self,
mut index: u32, mut index: u32,
sk_ops: *mut bpf_sock_ops, sk_ops: *mut bpf_sock_ops,
flags: u64, flags: u64,
) -> Result<(), i64> { ) -> Result<(), i64> {
let ret = bpf_sock_map_update( let ret = bpf_sock_map_update(
sk_ops, sk_ops,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
&mut index as *mut _ as *mut c_void, &mut index as *mut _ as *mut c_void,
flags, flags,
); );
@ -66,19 +68,19 @@ impl SockMap {
} }
} }
pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 { pub unsafe fn redirect_msg(&self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 {
bpf_msg_redirect_map( bpf_msg_redirect_map(
ctx.as_ptr() as *mut _, ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
index, index,
flags, flags,
) )
} }
pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 { pub unsafe fn redirect_skb(&self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 {
bpf_sk_redirect_map( bpf_sk_redirect_map(
ctx.as_ptr() as *mut _, ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _, self.def.get() as *mut _,
index, index,
flags, flags,
) )

@ -1,4 +1,4 @@
use core::mem; use core::{cell::UnsafeCell, mem};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_STACK_TRACE}, bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_STACK_TRACE},
@ -9,15 +9,17 @@ use crate::{
#[repr(transparent)] #[repr(transparent)]
pub struct StackTrace { pub struct StackTrace {
def: bpf_map_def, def: UnsafeCell<bpf_map_def>,
} }
unsafe impl Sync for StackTrace {}
const PERF_MAX_STACK_DEPTH: u32 = 127; const PERF_MAX_STACK_DEPTH: u32 = 127;
impl StackTrace { impl StackTrace {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace { pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace {
StackTrace { StackTrace {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE, type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH, value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
@ -25,13 +27,13 @@ impl StackTrace {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::None as u32, pinning: PinningType::None as u32,
}, }),
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace { pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace {
StackTrace { StackTrace {
def: bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE, type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH, value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
@ -39,12 +41,12 @@ impl StackTrace {
map_flags: flags, map_flags: flags,
id: 0, id: 0,
pinning: PinningType::ByName as u32, pinning: PinningType::ByName as u32,
}, }),
} }
} }
pub unsafe fn get_stackid<C: BpfContext>(&mut self, ctx: &C, flags: u64) -> Result<i64, i64> { pub unsafe fn get_stackid<C: BpfContext>(&self, ctx: &C, flags: u64) -> Result<i64, i64> {
let ret = bpf_get_stackid(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, flags); let ret = bpf_get_stackid(ctx.as_ptr(), self.def.get() as *mut _, flags);
if ret < 0 { if ret < 0 {
Err(ret) Err(ret)
} else { } else {

@ -14,10 +14,10 @@ use aya_bpf::{
}; };
#[map] #[map]
static mut FOO: Array<u32> = Array::<u32>::with_max_entries(10, 0); static FOO: Array<u32> = Array::<u32>::with_max_entries(10, 0);
#[map(name = "BAR")] #[map(name = "BAR")]
static mut BAZ: Array<u32> = Array::<u32>::with_max_entries(10, 0); static BAZ: Array<u32> = Array::<u32>::with_max_entries(10, 0);
#[xdp] #[xdp]
pub fn pass(ctx: XdpContext) -> u32 { pub fn pass(ctx: XdpContext) -> u32 {
@ -34,4 +34,4 @@ unsafe fn try_pass(_ctx: XdpContext) -> Result<u32, u32> {
#[panic_handler] #[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! { fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe { core::hint::unreachable_unchecked() } unsafe { core::hint::unreachable_unchecked() }
} }

Loading…
Cancel
Save