Merge pull request #132 from eero-thia/thia/safe_map

aya-bpf: remove unnecessary unsafe markers on map functions.
pull/133/head
Alessandro Decina 3 years ago committed by GitHub
commit 32dc8a0d97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void;
@ -45,16 +45,14 @@ impl<T> Array<T> {
}
}
pub unsafe fn get(&mut self, index: u32) -> Option<&T> {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
&index as *const _ as *const c_void,
);
if value.is_null() {
None
} else {
pub fn get(&mut self, index: u32) -> Option<&T> {
unsafe {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
&index as *const _ as *const c_void,
);
// FIXME: alignment
Some(&*(value as *const T))
NonNull::new(value as *mut T).map(|p| p.as_ref())
}
}
}

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_map_type::{
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
@ -36,17 +36,17 @@ impl<K, V> HashMap<K, V> {
}
#[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> {
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
}
#[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
}
#[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> {
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
}
}
@ -81,17 +81,17 @@ impl<K, V> LruHashMap<K, V> {
}
#[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> {
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
}
#[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
}
#[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> {
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
}
}
@ -131,17 +131,17 @@ impl<K, V> PerCpuHashMap<K, V> {
}
#[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> {
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
}
#[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
}
#[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> {
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
}
}
@ -181,17 +181,17 @@ impl<K, V> LruPerCpuHashMap<K, V> {
}
#[inline]
pub unsafe fn get(&mut self, key: &K) -> Option<&V> {
pub fn get(&mut self, key: &K) -> Option<&V> {
get(&mut self.def, key)
}
#[inline]
pub unsafe fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(&mut self.def, key, value, flags)
}
#[inline]
pub unsafe fn remove(&mut self, key: &K) -> Result<(), c_long> {
pub fn remove(&mut self, key: &K) -> Result<(), c_long> {
remove(&mut self.def, key)
}
}
@ -209,42 +209,30 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
}
#[inline]
unsafe fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
if value.is_null() {
None
} else {
fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> {
unsafe {
let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
// FIXME: alignment
Some(&*(value as *const V))
NonNull::new(value as *mut V).map(|p| p.as_ref())
}
}
#[inline]
unsafe fn insert<K, V>(
def: &mut bpf_map_def,
key: &K,
value: &V,
flags: u64,
) -> Result<(), c_long> {
let ret = bpf_map_update_elem(
def as *mut _ as *mut _,
key as *const _ as *const _,
value as *const _ as *const _,
flags,
);
if ret < 0 {
return Err(ret);
}
Ok(())
fn insert<K, V>(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe {
bpf_map_update_elem(
def as *mut _ as *mut _,
key as *const _ as *const _,
value as *const _ as *const _,
flags,
)
};
(ret >= 0).then(|| ()).ok_or(ret)
}
#[inline]
unsafe fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> {
let value = bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void);
if value < 0 {
Err(value)
} else {
Ok(())
}
fn remove<K>(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> {
let ret =
unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) };
(ret >= 0).then(|| ()).ok_or(ret)
}

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{marker::PhantomData, mem, ptr::NonNull};
use aya_bpf_cty::c_void;
@ -46,30 +46,27 @@ impl<T> PerCpuArray<T> {
}
#[inline(always)]
pub unsafe fn get(&mut self, index: u32) -> Option<&T> {
let value = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
&index as *const _ as *const c_void,
);
if value.is_null() {
None
} else {
pub fn get(&mut self, index: u32) -> Option<&T> {
unsafe {
// FIXME: alignment
self.lookup(index).map(|p| p.as_ref())
}
}
#[inline(always)]
pub fn get_mut(&mut self, index: u32) -> Option<&mut T> {
unsafe {
// FIXME: alignment
Some(&*(value as *const T))
self.lookup(index).map(|mut p| p.as_mut())
}
}
#[inline(always)]
pub unsafe fn get_mut(&mut self, index: u32) -> Option<&mut T> {
let value = bpf_map_lookup_elem(
unsafe fn lookup(&mut self, index: u32) -> Option<NonNull<T>> {
let ptr = bpf_map_lookup_elem(
&mut self.def as *mut _ as *mut _,
&index as *const _ as *const c_void,
);
if value.is_null() {
None
} else {
// FIXME: alignment
Some(&mut *(value as *mut T))
}
NonNull::new(ptr as *mut T)
}
}

@ -43,29 +43,25 @@ impl<T> Queue<T> {
}
}
pub unsafe fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> {
let ret = bpf_map_push_elem(
&mut self.def as *mut _ as *mut _,
value as *const _ as *const _,
flags,
);
if ret < 0 {
Err(ret)
} else {
Ok(())
}
pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_map_push_elem(
&mut self.def as *mut _ as *mut _,
value as *const _ as *const _,
flags,
)
};
(ret >= 0).then(|| ()).ok_or(ret)
}
pub unsafe fn pop(&mut self) -> Option<T> {
let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_pop_elem(
&mut self.def as *mut _ as *mut _,
&mut value as *mut _ as *mut _,
);
if ret < 0 {
None
} else {
Some(value.assume_init())
pub fn pop(&mut self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_pop_elem(
&mut self.def as *mut _ as *mut _,
value.as_mut_ptr() as *mut _,
);
(ret >= 0).then(|| value.assume_init())
}
}
}

@ -47,40 +47,42 @@ impl<K> SockHash<K> {
}
}
pub unsafe fn update(
pub fn update(
&mut self,
key: &mut K,
sk_ops: *mut bpf_sock_ops,
sk_ops: &mut bpf_sock_ops,
flags: u64,
) -> Result<(), i64> {
let ret = bpf_sock_hash_update(
sk_ops,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut c_void,
flags,
);
if ret < 0 {
Err(ret)
} else {
Ok(())
}
let ret = unsafe {
bpf_sock_hash_update(
sk_ops as *mut _,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut c_void,
flags,
)
};
(ret >= 0).then(|| ()).ok_or(ret)
}
pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
bpf_msg_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut _,
flags,
)
pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_msg_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut _,
flags,
)
}
}
pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
bpf_sk_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut _,
flags,
)
pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_sk_redirect_hash(
ctx.as_ptr() as *mut _,
&mut self.def as *mut _ as *mut _,
key as *mut _ as *mut _,
flags,
)
}
}
}

Loading…
Cancel
Save