From 41c6b56142f5637ca287ea35396a6463df87b0a1 Mon Sep 17 00:00:00 2001 From: Tatsuyuki Ishi Date: Sun, 29 May 2022 12:13:50 +0900 Subject: [PATCH] bpf: Replace map types to use &self, remove HashMap::get_mut The bpf_map_defs are now wrapped with UnsafeCell, which also happens to provide a cozy way to get a *mut pointer. An UnsafeCell isn't strictly required as the struct fields are practically opaque to us, but using an UnsafeCell follows the general best practices against miscompilation and also prevents some obvious errors. HashMap::get_mut was removed because the idea is completely unsound. Previous users should wrap their data with UnsafeCell instead or use atomics. Closes: https://github.com/aya-rs/aya/issues/233 --- bpf/aya-bpf/src/maps/array.rs | 18 +- bpf/aya-bpf/src/maps/hash_map.rs | 219 +++++++++++++----- bpf/aya-bpf/src/maps/lpm_trie.rs | 36 +-- bpf/aya-bpf/src/maps/per_cpu_array.rs | 22 +- bpf/aya-bpf/src/maps/perf/perf_event_array.rs | 20 +- .../src/maps/perf/perf_event_byte_array.rs | 20 +- bpf/aya-bpf/src/maps/program_array.rs | 20 +- bpf/aya-bpf/src/maps/queue.rs | 25 +- bpf/aya-bpf/src/maps/sock_hash.rs | 31 ++- bpf/aya-bpf/src/maps/sock_map.rs | 26 ++- bpf/aya-bpf/src/maps/stack_trace.rs | 18 +- 11 files changed, 279 insertions(+), 176 deletions(-) diff --git a/bpf/aya-bpf/src/maps/array.rs b/bpf/aya-bpf/src/maps/array.rs index 73aa4d27..53b596ca 100644 --- a/bpf/aya-bpf/src/maps/array.rs +++ b/bpf/aya-bpf/src/maps/array.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem, ptr::NonNull}; +use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_cty::c_void; @@ -10,14 +10,16 @@ use crate::{ #[repr(transparent)] pub struct Array { - def: bpf_map_def, + def: UnsafeCell, _t: PhantomData, } +unsafe impl Sync for Array {} + impl Array { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array { Array { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -25,14 +27,14 @@ impl Array { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), _t: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> Array { Array { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -40,15 +42,15 @@ impl Array { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), _t: PhantomData, } } - pub fn get(&mut self, index: u32) -> Option<&T> { + pub fn get(&self, index: u32) -> Option<&T> { unsafe { let value = bpf_map_lookup_elem( - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, &index as *const _ as *const c_void, ); // FIXME: alignment diff --git a/bpf/aya-bpf/src/maps/hash_map.rs b/bpf/aya-bpf/src/maps/hash_map.rs index 90fb467e..13aa1aed 100644 --- a/bpf/aya-bpf/src/maps/hash_map.rs +++ b/bpf/aya-bpf/src/maps/hash_map.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem, ptr::NonNull}; +use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_bindings::bindings::bpf_map_type::{ BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, @@ -13,15 +13,22 @@ use crate::{ #[repr(transparent)] pub struct HashMap { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, _v: PhantomData, } +unsafe impl Sync for HashMap {} + impl HashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap { HashMap { - def: build_def::(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::None), + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_HASH, + max_entries, + flags, + PinningType::None, + )), _k: PhantomData, _v: PhantomData, } @@ -29,44 +36,73 @@ impl HashMap { pub const fn pinned(max_entries: u32, flags: u32) -> HashMap { HashMap { - def: build_def::(BPF_MAP_TYPE_HASH, max_entries, flags, PinningType::ByName), + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_HASH, + max_entries, + flags, + PinningType::ByName, + )), _k: PhantomData, _v: PhantomData, } } + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. #[inline] - pub fn get(&mut self, key: &K) -> Option<&V> { - get(&mut self.def, key) + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get(), key) } + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. #[inline] - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - get_mut(&mut self.def, key) + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key) } #[inline] - pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(&mut self.def, key, value, flags) + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get(), key, value, flags) } #[inline] - pub fn remove(&mut self, key: &K) -> Result<(), c_long> { - remove(&mut self.def, key) + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get(), key) } } #[repr(transparent)] pub struct LruHashMap { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, _v: PhantomData, } +unsafe impl Sync for LruHashMap {} + impl LruHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { - def: build_def::(BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::None), + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_LRU_HASH, + max_entries, + flags, + PinningType::None, + )), _k: PhantomData, _v: PhantomData, } @@ -74,54 +110,73 @@ impl LruHashMap { pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_HASH, max_entries, flags, PinningType::ByName, - ), + )), _k: PhantomData, _v: PhantomData, } } + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. #[inline] - pub fn get(&mut self, key: &K) -> Option<&V> { - get(&mut self.def, key) + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get(), key) } + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. #[inline] - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - get_mut(&mut self.def, key) + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key) } #[inline] - pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(&mut self.def, key, value, flags) + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get(), key, value, flags) } #[inline] - pub fn remove(&mut self, key: &K) -> Result<(), c_long> { - remove(&mut self.def, key) + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get(), key) } } #[repr(transparent)] pub struct PerCpuHashMap { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, _v: PhantomData, } +unsafe impl Sync for PerCpuHashMap {} + impl PerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_PERCPU_HASH, max_entries, flags, PinningType::None, - ), + )), _k: PhantomData, _v: PhantomData, } @@ -129,54 +184,73 @@ impl PerCpuHashMap { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_PERCPU_HASH, max_entries, flags, PinningType::ByName, - ), + )), _k: PhantomData, _v: PhantomData, } } + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. #[inline] - pub fn get(&mut self, key: &K) -> Option<&V> { - get(&mut self.def, key) + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get(), key) } + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. #[inline] - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - get_mut(&mut self.def, key) + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key) } #[inline] - pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(&mut self.def, key, value, flags) + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get(), key, value, flags) } #[inline] - pub fn remove(&mut self, key: &K) -> Result<(), c_long> { - remove(&mut self.def, key) + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get(), key) } } #[repr(transparent)] pub struct LruPerCpuHashMap { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, _v: PhantomData, } +unsafe impl Sync for LruPerCpuHashMap {} + impl LruPerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_PERCPU_HASH, max_entries, flags, PinningType::None, - ), + )), _k: PhantomData, _v: PhantomData, } @@ -184,35 +258,52 @@ impl LruPerCpuHashMap { pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LRU_PERCPU_HASH, max_entries, flags, PinningType::ByName, - ), + )), _k: PhantomData, _v: PhantomData, } } + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get(), key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. #[inline] - pub fn get(&mut self, key: &K) -> Option<&V> { - get(&mut self.def, key) + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get(), key) } + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. #[inline] - pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { - get_mut(&mut self.def, key) + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key) } #[inline] - pub fn insert(&mut self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(&mut self.def, key, value, flags) + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get(), key, value, flags) } #[inline] - pub fn remove(&mut self, key: &K) -> Result<(), c_long> { - remove(&mut self.def, key) + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get(), key) } } @@ -229,28 +320,29 @@ const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType } #[inline] -fn get<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a V> { +fn get_ptr_mut(def: *mut bpf_map_def, key: &K) -> Option<*mut V> { unsafe { - let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); + let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void); // FIXME: alignment - NonNull::new(value as *mut V).map(|p| p.as_ref()) + NonNull::new(value as *mut V).map(|p| p.as_ptr()) } } #[inline] -fn get_mut<'a, K, V>(def: &mut bpf_map_def, key: &K) -> Option<&'a mut V> { - unsafe { - let value = bpf_map_lookup_elem(def as *mut _ as *mut _, key as *const _ as *const c_void); - // FIXME: alignment - NonNull::new(value as *mut V).map(|mut p| p.as_mut()) - } +fn get_ptr(def: *mut bpf_map_def, key: &K) -> Option<*const V> { + get_ptr_mut(def, key).map(|p| p as *const V) +} + +#[inline] +unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> { + get_ptr(def, key).map(|p| &*p) } #[inline] -fn insert(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { +fn insert(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { let ret = unsafe { bpf_map_update_elem( - def as *mut _ as *mut _, + def as *mut _, key as *const _ as *const _, value as *const _ as *const _, flags, @@ -260,8 +352,7 @@ fn insert(def: &mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result } #[inline] -fn remove(def: &mut bpf_map_def, key: &K) -> Result<(), c_long> { - let ret = - unsafe { bpf_map_delete_elem(def as *mut _ as *mut _, key as *const _ as *const c_void) }; +fn remove(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> { + let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) }; (ret >= 0).then(|| ()).ok_or(ret) } diff --git a/bpf/aya-bpf/src/maps/lpm_trie.rs b/bpf/aya-bpf/src/maps/lpm_trie.rs index 41888f77..b0eeefb3 100644 --- a/bpf/aya-bpf/src/maps/lpm_trie.rs +++ b/bpf/aya-bpf/src/maps/lpm_trie.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem, ptr::NonNull}; +use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_cty::{c_long, c_void}; @@ -10,11 +10,13 @@ use crate::{ #[repr(transparent)] pub struct LpmTrie { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, _v: PhantomData, } +unsafe impl Sync for LpmTrie {} + #[repr(packed)] pub struct Key { /// Represents the number of bytes matched against. @@ -32,7 +34,12 @@ impl Key { impl LpmTrie { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LpmTrie { LpmTrie { - def: build_def::(BPF_MAP_TYPE_LPM_TRIE, max_entries, flags, PinningType::None), + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_LPM_TRIE, + max_entries, + flags, + PinningType::None, + )), _k: PhantomData, _v: PhantomData, } @@ -40,34 +47,32 @@ impl LpmTrie { pub const fn pinned(max_entries: u32, flags: u32) -> LpmTrie { LpmTrie { - def: build_def::( + def: UnsafeCell::new(build_def::( BPF_MAP_TYPE_LPM_TRIE, max_entries, flags, PinningType::ByName, - ), + )), _k: PhantomData, _v: PhantomData, } } #[inline] - pub fn get(&mut self, key: &Key) -> Option<&V> { + pub fn get(&self, key: &Key) -> Option<&V> { unsafe { - let value = bpf_map_lookup_elem( - &mut self.def as *mut _ as *mut _, - key as *const _ as *const c_void, - ); + let value = + bpf_map_lookup_elem(self.def.get() as *mut _, key as *const _ as *const c_void); // FIXME: alignment NonNull::new(value as *mut V).map(|p| p.as_ref()) } } #[inline] - pub fn insert(&mut self, key: &Key, value: &V, flags: u64) -> Result<(), c_long> { + pub fn insert(&self, key: &Key, value: &V, flags: u64) -> Result<(), c_long> { let ret = unsafe { bpf_map_update_elem( - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, key as *const _ as *const _, value as *const _ as *const _, flags, @@ -77,12 +82,9 @@ impl LpmTrie { } #[inline] - pub fn remove(&mut self, key: &Key) -> Result<(), c_long> { + pub fn remove(&self, key: &Key) -> Result<(), c_long> { let ret = unsafe { - bpf_map_delete_elem( - &mut self.def as *mut _ as *mut _, - key as *const _ as *const c_void, - ) + bpf_map_delete_elem(self.def.get() as *mut _, key as *const _ as *const c_void) }; (ret >= 0).then(|| ()).ok_or(ret) } diff --git a/bpf/aya-bpf/src/maps/per_cpu_array.rs b/bpf/aya-bpf/src/maps/per_cpu_array.rs index a33f1523..7c567b49 100644 --- a/bpf/aya-bpf/src/maps/per_cpu_array.rs +++ b/bpf/aya-bpf/src/maps/per_cpu_array.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem, ptr::NonNull}; +use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use aya_bpf_cty::c_void; @@ -10,14 +10,16 @@ use crate::{ #[repr(transparent)] pub struct PerCpuArray { - def: bpf_map_def, + def: UnsafeCell, _t: PhantomData, } +unsafe impl Sync for PerCpuArray {} + impl PerCpuArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray { PerCpuArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERCPU_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -25,14 +27,14 @@ impl PerCpuArray { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), _t: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray { PerCpuArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERCPU_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -40,13 +42,13 @@ impl PerCpuArray { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), _t: PhantomData, } } #[inline(always)] - pub fn get(&mut self, index: u32) -> Option<&T> { + pub fn get(&self, index: u32) -> Option<&T> { unsafe { // FIXME: alignment self.lookup(index).map(|p| p.as_ref()) @@ -54,7 +56,7 @@ impl PerCpuArray { } #[inline(always)] - pub fn get_mut(&mut self, index: u32) -> Option<&mut T> { + pub fn get_mut(&self, index: u32) -> Option<&mut T> { unsafe { // FIXME: alignment self.lookup(index).map(|mut p| p.as_mut()) @@ -62,9 +64,9 @@ impl PerCpuArray { } #[inline(always)] - unsafe fn lookup(&mut self, index: u32) -> Option> { + unsafe fn lookup(&self, index: u32) -> Option> { let ptr = bpf_map_lookup_elem( - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, &index as *const _ as *const c_void, ); NonNull::new(ptr as *mut T) diff --git a/bpf/aya-bpf/src/maps/perf/perf_event_array.rs b/bpf/aya-bpf/src/maps/perf/perf_event_array.rs index b102c871..c881a885 100644 --- a/bpf/aya-bpf/src/maps/perf/perf_event_array.rs +++ b/bpf/aya-bpf/src/maps/perf/perf_event_array.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{cell::UnsafeCell, marker::PhantomData, mem}; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU}, @@ -9,10 +9,12 @@ use crate::{ #[repr(transparent)] pub struct PerfEventArray { - def: bpf_map_def, + def: UnsafeCell, _t: PhantomData, } +unsafe impl Sync for PerfEventArray {} + impl PerfEventArray { pub const fn new(flags: u32) -> PerfEventArray { PerfEventArray::with_max_entries(0, flags) @@ -20,7 +22,7 @@ impl PerfEventArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray { PerfEventArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -28,14 +30,14 @@ impl PerfEventArray { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), _t: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray { PerfEventArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -43,21 +45,21 @@ impl PerfEventArray { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), _t: PhantomData, } } - pub fn output(&mut self, ctx: &C, data: &T, flags: u32) { + pub fn output(&self, ctx: &C, data: &T, flags: u32) { self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags) } - pub fn output_at_index(&mut self, ctx: &C, index: u32, data: &T, flags: u32) { + pub fn output_at_index(&self, ctx: &C, index: u32, data: &T, flags: u32) { let flags = (flags as u64) << 32 | index as u64; unsafe { bpf_perf_event_output( ctx.as_ptr(), - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, flags, data as *const _ as *mut _, mem::size_of::() as u64, diff --git a/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs b/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs index 28424891..46c3613f 100644 --- a/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs +++ b/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs @@ -1,4 +1,4 @@ -use core::mem; +use core::{cell::UnsafeCell, mem}; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_F_CURRENT_CPU}, @@ -9,9 +9,11 @@ use crate::{ #[repr(transparent)] pub struct PerfEventByteArray { - def: bpf_map_def, + def: UnsafeCell, } +unsafe impl Sync for PerfEventByteArray {} + impl PerfEventByteArray { pub const fn new(flags: u32) -> PerfEventByteArray { PerfEventByteArray::with_max_entries(0, flags) @@ -19,7 +21,7 @@ impl PerfEventByteArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray { PerfEventByteArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -27,13 +29,13 @@ impl PerfEventByteArray { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), } } pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray { PerfEventByteArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -41,20 +43,20 @@ impl PerfEventByteArray { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), } } - pub fn output(&mut self, ctx: &C, data: &[u8], flags: u32) { + pub fn output(&self, ctx: &C, data: &[u8], flags: u32) { self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags) } - pub fn output_at_index(&mut self, ctx: &C, index: u32, data: &[u8], flags: u32) { + pub fn output_at_index(&self, ctx: &C, index: u32, data: &[u8], flags: u32) { let flags = (flags as u64) << 32 | index as u64; unsafe { bpf_perf_event_output( ctx.as_ptr(), - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, flags, data.as_ptr() as *mut _, data.len() as u64, diff --git a/bpf/aya-bpf/src/maps/program_array.rs b/bpf/aya-bpf/src/maps/program_array.rs index 06c91b36..62fa07d4 100644 --- a/bpf/aya-bpf/src/maps/program_array.rs +++ b/bpf/aya-bpf/src/maps/program_array.rs @@ -1,4 +1,4 @@ -use core::{hint::unreachable_unchecked, mem}; +use core::{cell::UnsafeCell, hint::unreachable_unchecked, mem}; use aya_bpf_cty::c_long; @@ -19,7 +19,7 @@ use crate::{ /// # use aya_bpf::{programs::LsmContext}; /// /// #[map] -/// static mut JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0); +/// static JUMP_TABLE: ProgramArray = ProgramArray::with_max_entries(16, 0); /// /// # unsafe fn try_test(ctx: &LsmContext) -> Result<(), c_long> { /// let index: u32 = 13; @@ -33,13 +33,15 @@ use crate::{ /// ``` #[repr(transparent)] pub struct ProgramArray { - def: bpf_map_def, + def: UnsafeCell, } +unsafe impl Sync for ProgramArray {} + impl ProgramArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray { ProgramArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PROG_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -47,13 +49,13 @@ impl ProgramArray { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), } } pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray { ProgramArray { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_PROG_ARRAY, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -61,7 +63,7 @@ impl ProgramArray { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), } } @@ -78,8 +80,8 @@ impl ProgramArray { /// /// On success, this function **does not return** into the original program. /// On failure, a negative error is returned, wrapped in `Err()`. - pub unsafe fn tail_call(&mut self, ctx: &C, index: u32) -> Result { - let res = bpf_tail_call(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, index); + pub unsafe fn tail_call(&self, ctx: &C, index: u32) -> Result { + let res = bpf_tail_call(ctx.as_ptr(), self.def.get() as *mut _, index); if res != 0 { Err(res) } else { diff --git a/bpf/aya-bpf/src/maps/queue.rs b/bpf/aya-bpf/src/maps/queue.rs index 663db494..fd0e73c1 100644 --- a/bpf/aya-bpf/src/maps/queue.rs +++ b/bpf/aya-bpf/src/maps/queue.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{cell::UnsafeCell, marker::PhantomData, mem}; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE}, @@ -8,14 +8,16 @@ use crate::{ #[repr(transparent)] pub struct Queue { - def: bpf_map_def, + def: UnsafeCell, _t: PhantomData, } +unsafe impl Sync for Queue {} + impl Queue { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue { Queue { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_QUEUE, key_size: 0, value_size: mem::size_of::() as u32, @@ -23,14 +25,14 @@ impl Queue { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), _t: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> Queue { Queue { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_QUEUE, key_size: 0, value_size: mem::size_of::() as u32, @@ -38,15 +40,15 @@ impl Queue { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), _t: PhantomData, } } - pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> { + pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> { let ret = unsafe { bpf_map_push_elem( - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, value as *const _ as *const _, flags, ) @@ -54,13 +56,10 @@ impl Queue { (ret >= 0).then(|| ()).ok_or(ret) } - pub fn pop(&mut self) -> Option { + pub fn pop(&self) -> Option { unsafe { let mut value = mem::MaybeUninit::uninit(); - let ret = bpf_map_pop_elem( - &mut self.def as *mut _ as *mut _, - value.as_mut_ptr() as *mut _, - ); + let ret = bpf_map_pop_elem(self.def.get() as *mut _, value.as_mut_ptr() as *mut _); (ret >= 0).then(|| value.assume_init()) } } diff --git a/bpf/aya-bpf/src/maps/sock_hash.rs b/bpf/aya-bpf/src/maps/sock_hash.rs index cd3d0c04..052fb33b 100644 --- a/bpf/aya-bpf/src/maps/sock_hash.rs +++ b/bpf/aya-bpf/src/maps/sock_hash.rs @@ -1,4 +1,4 @@ -use core::{marker::PhantomData, mem}; +use core::{cell::UnsafeCell, marker::PhantomData, mem}; use aya_bpf_cty::c_void; @@ -15,14 +15,16 @@ use crate::{ #[repr(transparent)] pub struct SockHash { - def: bpf_map_def, + def: UnsafeCell, _k: PhantomData, } +unsafe impl Sync for SockHash {} + impl SockHash { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash { SockHash { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_SOCKHASH, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -30,14 +32,14 @@ impl SockHash { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), _k: PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> SockHash { SockHash { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_SOCKHASH, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -45,21 +47,16 @@ impl SockHash { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), _k: PhantomData, } } - pub fn update( - &mut self, - key: &mut K, - sk_ops: &mut bpf_sock_ops, - flags: u64, - ) -> Result<(), i64> { + pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> { let ret = unsafe { bpf_sock_hash_update( sk_ops as *mut _, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, key as *mut _ as *mut c_void, flags, ) @@ -67,22 +64,22 @@ impl SockHash { (ret >= 0).then(|| ()).ok_or(ret) } - pub fn redirect_msg(&mut self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { + pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 { unsafe { bpf_msg_redirect_hash( ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, key as *mut _ as *mut _, flags, ) } } - pub fn redirect_skb(&mut self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { + pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 { unsafe { bpf_sk_redirect_hash( ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, key as *mut _ as *mut _, flags, ) diff --git a/bpf/aya-bpf/src/maps/sock_map.rs b/bpf/aya-bpf/src/maps/sock_map.rs index 907d288c..4ecafdda 100644 --- a/bpf/aya-bpf/src/maps/sock_map.rs +++ b/bpf/aya-bpf/src/maps/sock_map.rs @@ -1,4 +1,4 @@ -use core::mem; +use core::{cell::UnsafeCell, mem}; use aya_bpf_cty::c_void; @@ -15,13 +15,15 @@ use crate::{ #[repr(transparent)] pub struct SockMap { - def: bpf_map_def, + def: UnsafeCell, } +unsafe impl Sync for SockMap {} + impl SockMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap { SockMap { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_SOCKMAP, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -29,13 +31,13 @@ impl SockMap { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), } } pub const fn pinned(max_entries: u32, flags: u32) -> SockMap { SockMap { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_SOCKMAP, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, @@ -43,19 +45,19 @@ impl SockMap { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), } } pub unsafe fn update( - &mut self, + &self, mut index: u32, sk_ops: *mut bpf_sock_ops, flags: u64, ) -> Result<(), i64> { let ret = bpf_sock_map_update( sk_ops, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, &mut index as *mut _ as *mut c_void, flags, ); @@ -66,19 +68,19 @@ impl SockMap { } } - pub unsafe fn redirect_msg(&mut self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 { + pub unsafe fn redirect_msg(&self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 { bpf_msg_redirect_map( ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, index, flags, ) } - pub unsafe fn redirect_skb(&mut self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 { + pub unsafe fn redirect_skb(&self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 { bpf_sk_redirect_map( ctx.as_ptr() as *mut _, - &mut self.def as *mut _ as *mut _, + self.def.get() as *mut _, index, flags, ) diff --git a/bpf/aya-bpf/src/maps/stack_trace.rs b/bpf/aya-bpf/src/maps/stack_trace.rs index d87e6667..647e3dd4 100644 --- a/bpf/aya-bpf/src/maps/stack_trace.rs +++ b/bpf/aya-bpf/src/maps/stack_trace.rs @@ -1,4 +1,4 @@ -use core::mem; +use core::{cell::UnsafeCell, mem}; use crate::{ bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_STACK_TRACE}, @@ -9,15 +9,17 @@ use crate::{ #[repr(transparent)] pub struct StackTrace { - def: bpf_map_def, + def: UnsafeCell, } +unsafe impl Sync for StackTrace {} + const PERF_MAX_STACK_DEPTH: u32 = 127; impl StackTrace { pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace { StackTrace { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_STACK_TRACE, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32 * PERF_MAX_STACK_DEPTH, @@ -25,13 +27,13 @@ impl StackTrace { map_flags: flags, id: 0, pinning: PinningType::None as u32, - }, + }), } } pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace { StackTrace { - def: bpf_map_def { + def: UnsafeCell::new(bpf_map_def { type_: BPF_MAP_TYPE_STACK_TRACE, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32 * PERF_MAX_STACK_DEPTH, @@ -39,12 +41,12 @@ impl StackTrace { map_flags: flags, id: 0, pinning: PinningType::ByName as u32, - }, + }), } } - pub unsafe fn get_stackid(&mut self, ctx: &C, flags: u64) -> Result { - let ret = bpf_get_stackid(ctx.as_ptr(), &mut self.def as *mut _ as *mut _, flags); + pub unsafe fn get_stackid(&self, ctx: &C, flags: u64) -> Result { + let ret = bpf_get_stackid(ctx.as_ptr(), self.def.get() as *mut _, flags); if ret < 0 { Err(ret) } else {