Merge pull request #132 from eero-thia/thia/safe_map

aya-bpf: remove unnecessary unsafe markers on map functions.
pull/133/head
Alessandro Decina 3 years ago committed by GitHub
commit 32dc8a0d97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -45,16 +45,14 @@ impl<T> Array<T> {
} }
} }
pub unsafe fn get(&mut self, index: u32) -> Option<&T> { pub fn get(&mut self, index: u32) -> Option<&T> {
let value = bpf_map_lookup_elem( unsafe {
&mut self.def as *mut _ as *mut _, let value = bpf_map_lookup_elem(
&index as *const _ as *const c_void, &mut self.def as *mut _ as *mut _,
); &index as *const _ as *const c_void,
if value.is_null() { );
None
} else {
// FIXME: alignment // FIXME: alignment
Some(&*(value as *const T)) NonNull::new(value as *mut T).map(|p| p.as_ref())
} }
} }
} }

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_map_type::{ use aya_bpf_bindings::bindings::bpf_map_type::{
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
@ -36,17 +36,17 @@ impl<K, V> HashMap<K, V> {
} }
#[inline] #[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> { pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key) get(&mut self.def, key)
} }
#[inline] #[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(&mut self.def, key, value, flags)
} }
#[inline] #[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(&mut self.def, key)
} }
} }
@ -81,17 +81,17 @@ impl<K, V> LruHashMap<K, V> {
} }
#[inline] #[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> { pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key) get(&mut self.def, key)
} }
#[inline] #[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(&mut self.def, key, value, flags)
} }
#[inline] #[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(&mut self.def, key)
} }
} }
@ -131,17 +131,17 @@ impl<K, V> PerCpuHashMap<K, V> {
} }
#[inline] #[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> { pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key) get(&mut self.def, key)
} }
#[inline] #[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(&mut self.def, key, value, flags)
} }
#[inline] #[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(&mut self.def, key)
} }
} }
@ -181,17 +181,17 @@ impl<K, V> LruPerCpuHashMap<K, V> {
} }
#[inline] #[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> { pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key) get(&mut self.def, key)
} }
#[inline] #[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags) insert(&mut self.def, key, value, flags)
} }
#[inline] #[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key) remove(&mut self.def, key)
} }
} }
@ -209,42 +209,30 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
} }
#[inline] #[inline]
unsafe fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> { fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); unsafe {
if value.is_null() { let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
None
} else {
// FIXME: alignment // FIXME: alignment
Some(&*(value as *const V)) NonNull::new(value as *mut V).map(|p| p.as_ref())
} }
} }
#[inline] #[inline]
unsafe fn insert<K, V>( fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
def: &mut bpf_map_def, let ret = unsafe {
key: &K, bpf_map_update_elem(
value: &V, def as *mut _ as *mut _,
flags: u64, key as *const _ as *const _,
) -> Result<(), c_long> { value as *const _ as *const _,
let ret = bpf_map_update_elem( flags,
def as *mut _ as *mut _, )
key as *const _ as *const _, };
value as *const _ as *const _, (ret >= 0).then(|| ()).ok_or(ret)
flags,
);
if ret < 0 {
return Err(ret);
}
Ok(())
} }
#[inline] #[inline]
unsafe fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> { fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> {
let value = bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); let ret =
if value < 0 { unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) };
Err(value) (ret >= 0).then(|| ()).ok_or(ret)
} else {
Ok(())
}
} }

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem}; use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void; use aya_bpf_cty::c_void;
@ -46,30 +46,27 @@ impl<T> PerCpuArray<T> {
} }
#[inline(always)] #[inline(always)]
pub unsafe fn get(&mut self, index: u32) -> Option<&T> { pub fn get(&mut self, index: u32) -> Option<&T> {
let value = bpf_map_lookup_elem( unsafe {
&mut self.def as *mut _ as *mut _, // FIXME: alignment
&index as *const _ as *const c_void, self.lookup(index).map(|p| p.as_ref())
); }
if value.is_null() { }
None
} else { #[inline(always)]
pub fn get_mut(&mut self, index: u32) -> Option<&mut T> {
unsafe {
// FIXME: alignment // FIXME: alignment
Some(&*(value as *const T)) self.lookup(index).map(|mut p| p.as_mut())
} }
} }
#[inline(always)] #[inline(always)]
pub unsafe fn get_mut(&mut self, index: u32) -> Option<&mut T> { unsafe fn lookup(&mut self, index: u32) -> Option<NonNull<T>> {
let value = bpf_map_lookup_elem( let ptr = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _, &mut self.def as *mut _ as *mut _,
&index as *const _ as *const c_void, &index as *const _ as *const c_void,
); );
if value.is_null() { NonNull::new(ptr as *mut T)
None
} else {
// FIXME: alignment
Some(&mut *(value as *mut T))
}
} }
} }

@ -43,29 +43,25 @@ impl<T> Queue<T> {
} }
} }
pub unsafe fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> {
let ret = bpf_map_push_elem( let ret = unsafe {
&mut self.def as *mut _ as *mut _, bpf_map_push_elem(
value as *const _ as *const _, &mut self.def as *mut _ as *mut _,
flags, value as *const _ as *const _,
); flags,
if ret < 0 { )
Err(ret) };
} else { (ret >= 0).then(|| ()).ok_or(ret)
Ok(())
}
} }
pub unsafe fn pop(&mut self) -> Option<T> { pub fn pop(&mut self) -> Option<T> {
let mut value = mem::MaybeUninit::uninit(); unsafe {
let ret = bpf_map_pop_elem( let mut value = mem::MaybeUninit::uninit();
&mut self.def as *mut _ as *mut _, let ret = bpf_map_pop_elem(
&mut value as *mut _ as *mut _, &mut self.def as *mut _ as *mut _,
); value.as_mut_ptr() as *mut _,
if ret < 0 { );
None (ret >= 0).then(|| value.assume_init())
} else {
Some(value.assume_init())
} }
} }
} }

@ -47,40 +47,42 @@ impl<K> SockHash<K> {
} }
} }
pub unsafe fn update( pub fn update(
&mut self, &mut self,
key: &mut K, key: &mut K,
sk_ops: *mut bpf_sock_ops, sk_ops: &mut bpf_sock_ops,
flags: u64, flags: u64,
) -> Result<(), i64> { ) -> Result<(), i64> {
let ret = bpf_sock_hash_update( let ret = unsafe {
sk_ops, bpf_sock_hash_update(
&mut self.def as *mut _ as *mut _, sk_ops as *mut _,
key as *mut _ as *mut c_void, &mut self.def as *mut _ as *mut _,
flags, key as *mut _ as *mut c_void,
); flags,
if ret < 0 { )
Err(ret) };
} else { (ret >= 0).then(|| ()).ok_or(ret)
Ok(())
}
} }
pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
bpf_msg_redirect_hash( unsafe {
ctx.as_ptr() as *mut _, bpf_msg_redirect_hash(
&mut self.def as *mut _ as *mut _, ctx.as_ptr() as *mut _,
key as *mut _ as *mut _, &mut self.def as *mut _ as *mut _,
flags, key as *mut _ as *mut _,
) flags,
)
}
} }
pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
bpf_sk_redirect_hash( unsafe {
ctx.as_ptr() as *mut _, bpf_sk_redirect_hash(
&mut self.def as *mut _ as *mut _, ctx.as_ptr() as *mut _,
key as *mut _ as *mut _, &mut self.def as *mut _ as *mut _,
flags, key as *mut _ as *mut _,
) flags,
)
}
} }
} }

Loading…
Cancel
Save