bpf: Replace map types to use &self, remove HashMap::get_mut

The bpf_map_defs are now wrapped with UnsafeCell, which also happens to
provide a cozy way to get a *mut pointer. An UnsafeCell isn't strictly
required as the struct fields are practically opaque to us, but using an
UnsafeCell follows the general best practices against miscompilation and
also prevents some obvious errors.

HashMap::get_mut was removed because the idea is completely unsound.
Previous users should wrap their data with UnsafeCell instead or use
atomics.

Closes: https://github.com/aya-rs/aya/issues/233
pull/290/head
Tatsuyuki Ishi 2 years ago
parent ef3b029d26
commit 41c6b56142

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void;
@ -10,14 +10,16 @@ use crate::{
#[repr(transparent)]
pub struct Array<T> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>,
}
unsafe impl<T: Sync> Sync for Array<T> {}
impl<T> Array<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> {
Array {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32,
@ -25,14 +27,14 @@ impl<T> Array<T> {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
_t: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> {
Array {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32,
@ -40,15 +42,15 @@ impl<T> Array<T> {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
_t: PhantomData,
}
}
pub fn get(&mut self, index: u32) -> Option<&T> {
pub fn get(&self, index: u32) -> Option<&T> {
unsafe {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
&index as *const _ as *const c_void,
);
// FIXME: alignment

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_map_type::{
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
@ -13,15 +13,22 @@ use crate::{
#[repr(transparent)]
pub struct HashMap<K, V> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::None),
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
@ -29,44 +36,73 @@ impl<K, V> HashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::ByName),
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
get_mut(&mut self.def, key)
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key)
}
#[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags)
}
#[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key)
}
}
#[repr(transparent)]
pub struct LruHashMap<K, V> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap {
def: build_def::<K, V>(BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::None),
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
@ -74,54 +110,73 @@ impl<K, V> LruHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::ByName,
),
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
get_mut(&mut self.def, key)
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key)
}
#[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags)
}
#[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key)
}
}
#[repr(transparent)]
pub struct PerCpuHashMap<K, V> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
),
)),
_k: PhantomData,
_v: PhantomData,
}
@ -129,54 +184,73 @@ impl<K, V> PerCpuHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
),
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
get_mut(&mut self.def, key)
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key)
}
#[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags)
}
#[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key)
}
}
#[repr(transparent)]
pub struct LruPerCpuHashMap<K, V> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
),
)),
_k: PhantomData,
_v: PhantomData,
}
@ -184,35 +258,52 @@ impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
),
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
get_mut(&mut self.def, key)
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key)
}
#[inline]
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags)
}
#[inline]
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key)
}
}
@ -229,28 +320,29 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
}
#[inline]
fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> {
fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
unsafe {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
// FIXME: alignment
NonNull::new(value as *mut V).map(|p| p.as_ref())
NonNull::new(value as *mut V).map(|p| p.as_ptr())
}
}
#[inline]
fn get_mut<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a mut V> {
unsafe {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
// FIXME: alignment
NonNull::new(value as *mut V).map(|mut p| p.as_mut())
}
fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
get_ptr_mut(def, key).map(|p| p as *const V)
}
#[inline]
unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| &*p)
}
#[inline]
fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe {
bpf_map_update_elem(
def as *mut _ as *mut _,
def as *mut _,
key as *const _ as *const _,
value as *const _ as *const _,
flags,
@ -260,8 +352,7 @@ fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result
}
#[inline]
fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> {
let ret =
unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) };
fn remove<K>(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> {
let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
(ret >= 0).then(|| ()).ok_or(ret)
}

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::{c_long, c_void};
@ -10,11 +10,13 @@ use crate::{
#[repr(transparent)]
pub struct LpmTrie<K, V> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for LpmTrie<K, V> {}
#[repr(packed)]
pub struct Key<K> {
/// Represents the number of bytes matched against.
@ -32,7 +34,12 @@ impl<K> Key<K> {
impl<K, V> LpmTrie<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
LpmTrie {
def: build_def::<K, V>(BPF_MAP_TYPE_LPM_TRIE, max_entries, flags, PinningType::None),
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
@ -40,34 +47,32 @@ impl<K, V> LpmTrie<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
LpmTrie {
def: build_def::<K, V>(
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE,
max_entries,
flags,
PinningType::ByName,
),
)),
_k: PhantomData,
_v: PhantomData,
}
}
#[inline]
pub fn get(&mut self, key: &Key<K>) -> Option<&V> {
pub fn get(&self, key: &Key<K>) -> Option<&V> {
unsafe {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
key as *const _ as *const c_void,
);
let value =
bpf_map_lookup_elem(self.def.get() as *mut _, key as *const _ as *const c_void);
// FIXME: alignment
NonNull::new(value as *mut V).map(|p| p.as_ref())
}
}
#[inline]
pub fn insert(&mut self, key: &Key<K>, value: &V, flags: u64) -> Result<(), c_long> {
pub fn insert(&self, key: &Key<K>, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe {
bpf_map_update_elem(
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
key as *const _ as *const _,
value as *const _ as *const _,
flags,
@ -77,12 +82,9 @@ impl<K, V> LpmTrie<K, V> {
}
#[inline]
pub fn remove(&mut self, key: &Key<K>) -> Result<(), c_long> {
pub fn remove(&self, key: &Key<K>) -> Result<(), c_long> {
let ret = unsafe {
bpf_map_delete_elem(
&mut self.def as *mut _ as *mut _,
key as *const _ as *const c_void,
)
bpf_map_delete_elem(self.def.get() as *mut _, key as *const _ as *const c_void)
};
(ret >= 0).then(|| ()).ok_or(ret)
}

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem, ptr::NonNull};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void;
@ -10,14 +10,16 @@ use crate::{
#[repr(transparent)]
pub struct PerCpuArray<T> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>,
}
unsafe impl<T: Sync> Sync for PerCpuArray<T> {}
impl<T> PerCpuArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32,
@ -25,14 +27,14 @@ impl<T> PerCpuArray<T> {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
_t: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32,
@ -40,13 +42,13 @@ impl<T> PerCpuArray<T> {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
_t: PhantomData,
}
}
#[inline(always)]
pub fn get(&mut self, index: u32) -> Option<&T> {
pub fn get(&self, index: u32) -> Option<&T> {
unsafe {
// FIXME: alignment
self.lookup(index).map(|p| p.as_ref())
@ -54,7 +56,7 @@ impl<T> PerCpuArray<T> {
}
#[inline(always)]
pub fn get_mut(&mut self, index: u32) -> Option<&mut T> {
pub fn get_mut(&self, index: u32) -> Option<&mut T> {
unsafe {
// FIXME: alignment
self.lookup(index).map(|mut p| p.as_mut())
@ -62,9 +64,9 @@ impl<T> PerCpuArray<T> {
}
#[inline(always)]
unsafe fn lookup(&mut self, index: u32) -> Option<NonNull<T>> {
unsafe fn lookup(&self, index: u32) -> Option<NonNull<T>> {
let ptr = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
&index as *const _ as *const c_void,
);
NonNull::new(ptr as *mut T)

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{cell::UnsafeCell, marker::PhantomData, mem};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
@ -9,10 +9,12 @@ use crate::{
#[repr(transparent)]
pub struct PerfEventArray<T> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>,
}
unsafe impl<T: Sync> Sync for PerfEventArray<T> {}
impl<T> PerfEventArray<T> {
pub const fn new(flags: u32) -> PerfEventArray<T> {
PerfEventArray::with_max_entries(0, flags)
@ -20,7 +22,7 @@ impl<T> PerfEventArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -28,14 +30,14 @@ impl<T> PerfEventArray<T> {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
_t: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -43,21 +45,21 @@ impl<T> PerfEventArray<T> {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
_t: PhantomData,
}
}
pub fn output<C: BpfContext>(&mut self, ctx: &C, data: &T, flags: u32) {
pub fn output<C: BpfContext>(&self, ctx: &C, data: &T, flags: u32) {
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
}
pub fn output_at_index<C: BpfContext>(&mut self, ctx: &C, index: u32, data: &T, flags: u32) {
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &T, flags: u32) {
let flags = (flags as u64) << 32 | index as u64;
unsafe {
bpf_perf_event_output(
ctx.as_ptr(),
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
flags,
data as *const _ as *mut _,
mem::size_of::<T>() as u64,

@ -1,4 +1,4 @@
use core::mem;
use core::{cell::UnsafeCell, mem};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU},
@ -9,9 +9,11 @@ use crate::{
#[repr(transparent)]
pub struct PerfEventByteArray {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for PerfEventByteArray {}
impl PerfEventByteArray {
pub const fn new(flags: u32) -> PerfEventByteArray {
PerfEventByteArray::with_max_entries(0, flags)
@ -19,7 +21,7 @@ impl PerfEventByteArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -27,13 +29,13 @@ impl PerfEventByteArray {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -41,20 +43,20 @@ impl PerfEventByteArray {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
}
}
pub fn output<C: BpfContext>(&mut self, ctx: &C, data: &[u8], flags: u32) {
pub fn output<C: BpfContext>(&self, ctx: &C, data: &[u8], flags: u32) {
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
}
pub fn output_at_index<C: BpfContext>(&mut self, ctx: &C, index: u32, data: &[u8], flags: u32) {
pub fn output_at_index<C: BpfContext>(&self, ctx: &C, index: u32, data: &[u8], flags: u32) {
let flags = (flags as u64) << 32 | index as u64;
unsafe {
bpf_perf_event_output(
ctx.as_ptr(),
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
flags,
data.as_ptr() as *mut _,
data.len() as u64,

@ -1,4 +1,4 @@
use core::{hint::unreachable_unchecked, mem};
use core::{cell::UnsafeCell, hint::unreachable_unchecked, mem};
use aya_bpf_cty::c_long;
@ -19,7 +19,7 @@ use crate::{
/// # use aya_bpf::{programs::LsmContext};
///
/// #[map]
/// static mut JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0);
/// static JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0);
///
/// # unsafe fn try_test(ctx: &LsmContext) -> Result<(), c_long> {
/// let index: u32 = 13;
@ -33,13 +33,15 @@ use crate::{
/// ```
#[repr(transparent)]
pub struct ProgramArray {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for ProgramArray {}
impl ProgramArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -47,13 +49,13 @@ impl ProgramArray {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -61,7 +63,7 @@ impl ProgramArray {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
}
}
@ -78,8 +80,8 @@ impl ProgramArray {
///
/// On success, this function **does not return** into the original program.
/// On failure, a negative error is returned, wrapped in `Err()`.
pub unsafe fn tail_call<C: BpfContext>(&mut self, ctx: &C, index: u32) -> Result<!, c_long> {
let res = bpf_tail_call(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, index);
pub unsafe fn tail_call<C: BpfContext>(&self, ctx: &C, index: u32) -> Result<!, c_long> {
let res = bpf_tail_call(ctx.as_ptr(), self.def.get() as *mut _, index);
if res != 0 {
Err(res)
} else {

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{cell::UnsafeCell, marker::PhantomData, mem};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE},
@ -8,14 +8,16 @@ use crate::{
#[repr(transparent)]
pub struct Queue<T> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_t: PhantomData<T>,
}
unsafe impl<T: Sync> Sync for Queue<T> {}
impl<T> Queue<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> {
Queue {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE,
key_size: 0,
value_size: mem::size_of::<T>() as u32,
@ -23,14 +25,14 @@ impl<T> Queue<T> {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
_t: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> {
Queue {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE,
key_size: 0,
value_size: mem::size_of::<T>() as u32,
@ -38,15 +40,15 @@ impl<T> Queue<T> {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
_t: PhantomData,
}
}
pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> {
pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_map_push_elem(
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
value as *const _ as *const _,
flags,
)
@ -54,13 +56,10 @@ impl<T> Queue<T> {
(ret >= 0).then(|| ()).ok_or(ret)
}
pub fn pop(&mut self) -> Option<T> {
pub fn pop(&self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_pop_elem(
&mut self.def as *mut _ as *mut _,
value.as_mut_ptr() as *mut _,
);
let ret = bpf_map_pop_elem(self.def.get() as *mut _, value.as_mut_ptr() as *mut _);
(ret >= 0).then(|| value.assume_init())
}
}

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{cell::UnsafeCell, marker::PhantomData, mem};
use aya_bpf_cty::c_void;
@ -15,14 +15,16 @@ use crate::{
#[repr(transparent)]
pub struct SockHash<K> {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
}
unsafe impl<K: Sync> Sync for SockHash<K> {}
impl<K> SockHash<K> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -30,14 +32,14 @@ impl<K> SockHash<K> {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
_k: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -45,21 +47,16 @@ impl<K> SockHash<K> {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
_k: PhantomData,
}
}
pub fn update(
&mut self,
key: &mut K,
sk_ops: &mut bpf_sock_ops,
flags: u64,
) -> Result<(), i64> {
pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_sock_hash_update(
sk_ops as *mut _,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut c_void,
flags,
)
@ -67,22 +64,22 @@ impl<K> SockHash<K> {
(ret >= 0).then(|| ()).ok_or(ret)
}
pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_msg_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut _,
flags,
)
}
}
pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_sk_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut _,
flags,
)

@ -1,4 +1,4 @@
use core::mem;
use core::{cell::UnsafeCell, mem};
use aya_bpf_cty::c_void;
@ -15,13 +15,15 @@ use crate::{
#[repr(transparent)]
pub struct SockMap {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for SockMap {}
impl SockMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap {
SockMap {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -29,13 +31,13 @@ impl SockMap {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> SockMap {
SockMap {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
@ -43,19 +45,19 @@ impl SockMap {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
}
}
pub unsafe fn update(
&mut self,
&self,
mut index: u32,
sk_ops: *mut bpf_sock_ops,
flags: u64,
) -> Result<(), i64> {
let ret = bpf_sock_map_update(
sk_ops,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
&mut index as *mut _ as *mut c_void,
flags,
);
@ -66,19 +68,19 @@ impl SockMap {
}
}
pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 {
pub unsafe fn redirect_msg(&self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 {
bpf_msg_redirect_map(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
index,
flags,
)
}
pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 {
pub unsafe fn redirect_skb(&self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 {
bpf_sk_redirect_map(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
self.def.get() as *mut _,
index,
flags,
)

@ -1,4 +1,4 @@
use core::mem;
use core::{cell::UnsafeCell, mem};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_STACK_TRACE},
@ -9,15 +9,17 @@ use crate::{
#[repr(transparent)]
pub struct StackTrace {
def: bpf_map_def,
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for StackTrace {}
const PERF_MAX_STACK_DEPTH: u32 = 127;
impl StackTrace {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace {
StackTrace {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
@ -25,13 +27,13 @@ impl StackTrace {
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
},
}),
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace {
StackTrace {
def: bpf_map_def {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
@ -39,12 +41,12 @@ impl StackTrace {
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
},
}),
}
}
pub unsafe fn get_stackid<C: BpfContext>(&mut self, ctx: &C, flags: u64) -> Result<i64, i64> {
let ret = bpf_get_stackid(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, flags);
pub unsafe fn get_stackid<C: BpfContext>(&self, ctx: &C, flags: u64) -> Result<i64, i64> {
let ret = bpf_get_stackid(ctx.as_ptr(), self.def.get() as *mut _, flags);
if ret < 0 {
Err(ret)
} else {

Loading…
Cancel
Save