diff --git a/bpf/aya-bpf/src/maps/array.rs b/bpf/aya-bpf/src/maps/array.rs index 349e7efe..73aa4d27 100644 --- a/bpf/aya-bpf/src/maps/array.rs +++ b/bpf/aya-bpf/src/maps/array.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_cty::c_void; @@ -45,16 +45,14 @@ impl Array { } } - pub unsafe fn get(&mut self, index: u32) -> Option<&T> { - let value = bpf_map_lookup_elem( - &mut self.def as *mut _ as *mut _, - &index as *const _ as *const c_void, - ); - if value.is_null() { - None - } else { + pub fn get(&mut self, index: u32) -> Option<&T> { + unsafe { + let value = bpf_map_lookup_elem( + &mut self.def as *mut _ as *mut _, + &index as *const _ as *const c_void, + ); // FIXME: alignment - Some(&*(value as *const T)) + NonNull::new(value as *mut T).map(|p| p.as_ref()) } } } diff --git a/bpf/aya-bpf/src/maps/hash_map.rs b/bpf/aya-bpf/src/maps/hash_map.rs index 64c043bd..e77b4135 100644 --- a/bpf/aya-bpf/src/maps/hash_map.rs +++ b/bpf/aya-bpf/src/maps/hash_map.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_bindings::bindings::bpf_map_type::{ BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, @@ -36,17 +36,17 @@ impl HashMap { } #[inline] - pub unsafe fn get(&mut self, key: &K) -> Option<&V> { + pub fn get(&mut self, key: &K) -> Option<&V> { get(&mut self.def, key) } #[inline] - pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(&mut self.def, key, value, flags) } #[inline] - pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { + pub fn remove(&mut self, key: &K) -> Result<(), c_long> { remove(&mut self.def, key) } } @@ -81,17 +81,17 @@ impl LruHashMap { } #[inline] - pub unsafe fn get(&mut self, key: &K) -> Option<&V> { + pub fn get(&mut self, key: &K) -> Option<&V> { get(&mut self.def, key) } #[inline] - pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(&mut self.def, key, value, flags) } #[inline] - pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { + pub fn remove(&mut self, key: &K) -> Result<(), c_long> { remove(&mut self.def, key) } } @@ -131,17 +131,17 @@ impl PerCpuHashMap { } #[inline] - pub unsafe fn get(&mut self, key: &K) -> Option<&V> { + pub fn get(&mut self, key: &K) -> Option<&V> { get(&mut self.def, key) } #[inline] - pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(&mut self.def, key, value, flags) } #[inline] - pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { + pub fn remove(&mut self, key: &K) -> Result<(), c_long> { remove(&mut self.def, key) } } @@ -181,17 +181,17 @@ impl LruPerCpuHashMap { } #[inline] - pub unsafe fn get(&mut self, key: &K) -> Option<&V> { + pub fn get(&mut self, key: &K) -> Option<&V> { get(&mut self.def, key) } #[inline] - pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { insert(&mut self.def, key, value, flags) } #[inline] - pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> { + pub fn remove(&mut self, key: &K) -> Result<(), c_long> { remove(&mut self.def, key) } } @@ -209,42 +209,30 @@ const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType } #[inline] -unsafe fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> { - let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); - if value.is_null() { - None - } else { +fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> { + unsafe { + let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); // FIXME: alignment - Some(&*(value as *const V)) + NonNull::new(value as *mut V).map(|p| p.as_ref()) } } #[inline] -unsafe fn insert( - def: &mut bpf_map_def, - key: &K, - value: &V, - flags: u64, -) -> Result<(), c_long> { - let ret = bpf_map_update_elem( - def as *mut _ as *mut _, - key as *const _ as *const _, - value as *const _ as *const _, - flags, - ); - if ret < 0 { - return Err(ret); - } - - Ok(()) +fn insert(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + let ret = unsafe { + bpf_map_update_elem( + def as *mut _ as *mut _, + key as *const _ as *const _, + value as *const _ as *const _, + flags, + ) + }; + (ret >= 0).then(|| ()).ok_or(ret) } #[inline] -unsafe fn remove(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> { - let value = bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); - if value < 0 { - Err(value) - } else { - Ok(()) - } +fn remove(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> { + let ret = + unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) }; + (ret >= 0).then(|| ()).ok_or(ret) } diff --git a/bpf/aya-bpf/src/maps/per_cpu_array.rs b/bpf/aya-bpf/src/maps/per_cpu_array.rs index 5019e93f..a33f1523 100644 --- a/bpf/aya-bpf/src/maps/per_cpu_array.rs +++ b/bpf/aya-bpf/src/maps/per_cpu_array.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_cty::c_void; @@ -46,30 +46,27 @@ impl PerCpuArray { } #[inline(always)] - pub unsafe fn get(&mut self, index: u32) -> Option<&T> { - let value = bpf_map_lookup_elem( - &mut self.def as *mut _ as *mut _, - &index as *const _ as *const c_void, - ); - if value.is_null() { - None - } else { + pub fn get(&mut self, index: u32) -> Option<&T> { + unsafe { + // FIXME: alignment + self.lookup(index).map(|p| p.as_ref()) + } + } + + #[inline(always)] + pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { + unsafe { // FIXME: alignment - Some(&*(value as *const T)) + self.lookup(index).map(|mut p| p.as_mut()) } } #[inline(always)] - pub unsafe fn get_mut(&mut self, index: u32) -> Option<&mut T> { - let value = bpf_map_lookup_elem( + unsafe fn lookup(&mut self, index: u32) -> Option> { + let ptr = bpf_map_lookup_elem( &mut self.def as *mut _ as *mut _, &index as *const _ as *const c_void, ); - if value.is_null() { - None - } else { - // FIXME: alignment - Some(&mut *(value as *mut T)) - } + NonNull::new(ptr as *mut T) } } diff --git a/bpf/aya-bpf/src/maps/queue.rs b/bpf/aya-bpf/src/maps/queue.rs index 783c5bbd..663db494 100644 --- a/bpf/aya-bpf/src/maps/queue.rs +++ b/bpf/aya-bpf/src/maps/queue.rs @@ -43,29 +43,25 @@ impl Queue { } } - pub unsafe fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { - let ret = bpf_map_push_elem( - &mut self.def as *mut _ as *mut _, - value as *const _ as *const _, - flags, - ); - if ret < 0 { - Err(ret) - } else { - Ok(()) - } + pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { + let ret = unsafe { + bpf_map_push_elem( + &mut self.def as *mut _ as *mut _, + value as *const _ as *const _, + flags, + ) + }; + (ret >= 0).then(|| ()).ok_or(ret) } - pub unsafe fn pop(&mut self) -> Option { - let mut value = mem::MaybeUninit::uninit(); - let ret = bpf_map_pop_elem( - &mut self.def as *mut _ as *mut _, - &mut value as *mut _ as *mut _, - ); - if ret < 0 { - None - } else { - Some(value.assume_init()) + pub fn pop(&mut self) -> Option { + unsafe { + let mut value = mem::MaybeUninit::uninit(); + let ret = bpf_map_pop_elem( + &mut self.def as *mut _ as *mut _, + value.as_mut_ptr() as *mut _, + ); + (ret >= 0).then(|| value.assume_init()) } } } diff --git a/bpf/aya-bpf/src/maps/sock_hash.rs b/bpf/aya-bpf/src/maps/sock_hash.rs index f99a9dc4..3148ee24 100644 --- a/bpf/aya-bpf/src/maps/sock_hash.rs +++ b/bpf/aya-bpf/src/maps/sock_hash.rs @@ -47,40 +47,42 @@ impl SockHash { } } - pub unsafe fn update( + pub fn update( &mut self, key: &mut K, - sk_ops: *mut bpf_sock_ops, + sk_ops: &mut bpf_sock_ops, flags: u64, ) -> Result<(), i64> { - let ret = bpf_sock_hash_update( - sk_ops, - &mut self.def as *mut _ as *mut _, - key as *mut _ as *mut c_void, - flags, - ); - if ret < 0 { - Err(ret) - } else { - Ok(()) - } + let ret = unsafe { + bpf_sock_hash_update( + sk_ops as *mut _, + &mut self.def as *mut _ as *mut _, + key as *mut _ as *mut c_void, + flags, + ) + }; + (ret >= 0).then(|| ()).ok_or(ret) } - pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { - bpf_msg_redirect_hash( - ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, - key as *mut _ as *mut _, - flags, - ) + pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { + unsafe { + bpf_msg_redirect_hash( + ctx.as_ptr() as *mut _, + &mut self.def as *mut _ as *mut _, + key as *mut _ as *mut _, + flags, + ) + } } - pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { - bpf_sk_redirect_hash( - ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, - key as *mut _ as *mut _, - flags, - ) + pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { + unsafe { + bpf_sk_redirect_hash( + ctx.as_ptr() as *mut _, + &mut self.def as *mut _ as *mut _, + key as *mut _ as *mut _, + flags, + ) + } } }