diff --git a/aya-bpf-macros/Cargo.toml b/aya-bpf-macros/Cargo.toml index a601444d..d3174cdf 100644 --- a/aya-bpf-macros/Cargo.toml +++ b/aya-bpf-macros/Cargo.toml @@ -14,3 +14,6 @@ syn = {version = "1.0", features = ["full"]} [dev-dependencies] aya-bpf = { path = "../bpf/aya-bpf" } + +[features] +btf-maps = [] diff --git a/aya-bpf-macros/src/expand.rs b/aya-bpf-macros/src/expand.rs index b96db0fe..7a9f921c 100644 --- a/aya-bpf-macros/src/expand.rs +++ b/aya-bpf-macros/src/expand.rs @@ -93,7 +93,8 @@ impl Map { } pub fn expand(&self) -> Result { - let section_name = "maps".to_string(); + // TODO(vadorovsky): Handle with feature. + let section_name = ".maps".to_string(); let name = &self.name; let item = &self.item; Ok(quote! { diff --git a/bpf/aya-bpf-bindings/src/lib.rs b/bpf/aya-bpf-bindings/src/lib.rs index 1528e4f8..8805854a 100644 --- a/bpf/aya-bpf-bindings/src/lib.rs +++ b/bpf/aya-bpf-bindings/src/lib.rs @@ -43,6 +43,78 @@ pub mod bindings { pub const TC_ACT_VALUE_MAX: i32 = crate::gen::bindings::TC_ACT_VALUE_MAX as i32; pub const TC_ACT_EXT_VAL_MASK: i32 = 268435455; + // TODO(vadorovsky): Handle that with a macro. + pub mod bpf_map_type { + pub const BPF_MAP_TYPE_UNSPEC: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_UNSPEC as usize; + pub const BPF_MAP_TYPE_HASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_HASH as usize; + pub const BPF_MAP_TYPE_ARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_ARRAY as usize; + pub const BPF_MAP_TYPE_PROG_ARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PROG_ARRAY as usize; + pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY as usize; + pub const BPF_MAP_TYPE_PERCPU_HASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH as usize; + pub const BPF_MAP_TYPE_PERCPU_ARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY as usize; + pub const BPF_MAP_TYPE_STACK_TRACE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STACK_TRACE as usize; + pub const BPF_MAP_TYPE_CGROUP_ARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_ARRAY as usize; + pub const BPF_MAP_TYPE_LRU_HASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LRU_HASH as usize; + pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH as usize; + pub const BPF_MAP_TYPE_LPM_TRIE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LPM_TRIE as usize; + pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_ARRAY_OF_MAPS as usize; + pub const BPF_MAP_TYPE_HASH_OF_MAPS: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_HASH_OF_MAPS as usize; + pub const BPF_MAP_TYPE_DEVMAP: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP as usize; + pub const BPF_MAP_TYPE_SOCKMAP: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SOCKMAP as usize; + pub const BPF_MAP_TYPE_CPUMAP: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CPUMAP as usize; + pub const BPF_MAP_TYPE_XSKMAP: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_XSKMAP as usize; + pub const BPF_MAP_TYPE_SOCKHASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SOCKHASH as usize; + pub const BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED as usize; + pub const BPF_MAP_TYPE_CGROUP_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE as usize; + pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY as usize; + pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE as usize; + pub const BPF_MAP_TYPE_QUEUE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_QUEUE as usize; + pub const BPF_MAP_TYPE_STACK: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STACK as usize; + pub const BPF_MAP_TYPE_SK_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SK_STORAGE as usize; + pub const BPF_MAP_TYPE_DEVMAP_HASH: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH as usize; + pub const BPF_MAP_TYPE_STRUCT_OPS: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STRUCT_OPS as usize; + pub const BPF_MAP_TYPE_RINGBUF: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_RINGBUF as usize; + pub const BPF_MAP_TYPE_INODE_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_INODE_STORAGE as usize; + pub const BPF_MAP_TYPE_TASK_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_TASK_STORAGE as usize; + pub const BPF_MAP_TYPE_BLOOM_FILTER: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER as usize; + pub const BPF_MAP_TYPE_USER_RINGBUF: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_USER_RINGBUF as usize; + pub const BPF_MAP_TYPE_CGRP_STORAGE: usize = + crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGRP_STORAGE as usize; + } + #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct bpf_map_def { diff --git a/bpf/aya-bpf/Cargo.toml b/bpf/aya-bpf/Cargo.toml index 3e6b390d..62ea3476 100644 --- a/bpf/aya-bpf/Cargo.toml +++ b/bpf/aya-bpf/Cargo.toml @@ -11,3 +11,7 @@ aya-bpf-bindings = { path = "../aya-bpf-bindings" } [build-dependencies] rustversion = "1.0" + +[features] +default = [] +btf-maps = ["aya-bpf-macros/btf-maps"] diff --git a/bpf/aya-bpf/src/maps/array.rs b/bpf/aya-bpf/src/maps/array.rs index 2150dd6e..2135442a 100644 --- a/bpf/aya-bpf/src/maps/array.rs +++ b/bpf/aya-bpf/src/maps/array.rs @@ -8,6 +8,11 @@ use crate::{ maps::PinningType, }; +// #[cfg(feature = "btf-maps")] +// #[repr(transparent)] +// pub struct Array { +// def: UnsafeCell, +// } #[repr(transparent)] pub struct Array { def: UnsafeCell, @@ -20,7 +25,7 @@ impl Array { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array { Array { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_ARRAY, + type_: BPF_MAP_TYPE_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -35,7 +40,7 @@ impl Array { pub const fn pinned(max_entries: u32, flags: u32) -> Array { Array { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_ARRAY, + type_: BPF_MAP_TYPE_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/bloom_filter.rs b/bpf/aya-bpf/src/maps/bloom_filter.rs index d15d264f..30771be7 100644 --- a/bpf/aya-bpf/src/maps/bloom_filter.rs +++ b/bpf/aya-bpf/src/maps/bloom_filter.rs @@ -18,7 +18,7 @@ impl BloomFilter { pub const fn with_max_entries(max_entries: u32, flags: u32) -> BloomFilter { BloomFilter { def: build_def::( - BPF_MAP_TYPE_BLOOM_FILTER, + BPF_MAP_TYPE_BLOOM_FILTER as u32, max_entries, flags, PinningType::None, @@ -30,7 +30,7 @@ impl BloomFilter { pub const fn pinned(max_entries: u32, flags: u32) -> BloomFilter { BloomFilter { def: build_def::( - BPF_MAP_TYPE_BLOOM_FILTER, + BPF_MAP_TYPE_BLOOM_FILTER as u32, max_entries, flags, PinningType::ByName, diff --git a/bpf/aya-bpf/src/maps/hash_map.rs b/bpf/aya-bpf/src/maps/hash_map.rs index 5877be90..c5871abe 100644 --- a/bpf/aya-bpf/src/maps/hash_map.rs +++ b/bpf/aya-bpf/src/maps/hash_map.rs @@ -1,49 +1,107 @@ -use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; +use core::{cell::UnsafeCell, ptr::NonNull}; use aya_bpf_bindings::bindings::bpf_map_type::{ BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, }; use aya_bpf_cty::{c_long, c_void}; +#[cfg(not(feature = "btf-maps"))] +use crate::{bindings::bpf_map_def, maps::PinningType}; use crate::{ - bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_HASH}, + bindings::bpf_map_type::BPF_MAP_TYPE_HASH, helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem}, - maps::PinningType, }; +#[cfg(feature = "btf-maps")] +#[repr(transparent)] +pub struct HashMap { + def: UnsafeCell>, +} +#[cfg(not(feature = "btf-maps"))] #[repr(transparent)] pub struct HashMap { def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } +#[cfg(feature = "btf-maps")] +unsafe impl Sync + for HashMap +{ +} +#[cfg(not(feature = "btf-maps"))] unsafe impl Sync for HashMap {} +#[cfg(feature = "btf-maps")] +impl HashMap { + pub const fn new() -> Self { + Self { + def: UnsafeCell::new(super::MapDef::new()), + } + } + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. + #[inline] + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. + #[inline] + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get() as *mut _, key) + } + + #[inline] + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get() as *mut _, key, value, flags) + } + + #[inline] + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get() as *mut _, key) + } +} +#[cfg(not(feature = "btf-maps"))] impl HashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap { HashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_HASH as u32, max_entries, flags, PinningType::None, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> HashMap { HashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, + BPF_MAP_TYPE_HASH as u32, max_entries, flags, PinningType::ByName, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } @@ -54,7 +112,7 @@ impl HashMap { /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { - get(self.def.get(), key) + get(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -62,7 +120,7 @@ impl HashMap { /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { - get_ptr(self.def.get(), key) + get_ptr(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -71,53 +129,105 @@ impl HashMap { /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key) + get_ptr_mut(self.def.get() as *mut _, key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(self.def.get(), key, value, flags) + insert(self.def.get() as *mut _, key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { - remove(self.def.get(), key) + remove(self.def.get() as *mut _, key) } } +#[cfg(feature = "btf-maps")] +#[repr(transparent)] +pub struct LruHashMap { + def: UnsafeCell>, +} +#[cfg(not(feature = "btf-maps"))] #[repr(transparent)] pub struct LruHashMap { def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } +#[cfg(feature = "btf-maps")] +unsafe impl Sync + for LruHashMap +{ +} +#[cfg(not(feature = "btf-maps"))] unsafe impl Sync for LruHashMap {} +#[cfg(feature = "btf-maps")] +impl LruHashMap { + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. + #[inline] + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. + #[inline] + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get() as *mut _, key) + } + + #[inline] + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get() as *mut _, key, value, flags) + } + + #[inline] + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get() as *mut _, key) + } +} +#[cfg(not(feature = "btf-maps"))] impl LruHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, + BPF_MAP_TYPE_LRU_HASH as u32, max_entries, flags, PinningType::None, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap { LruHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, + BPF_MAP_TYPE_LRU_HASH as u32, max_entries, flags, PinningType::ByName, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } @@ -128,7 +238,7 @@ impl LruHashMap { /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { - get(self.def.get(), key) + get(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -136,7 +246,7 @@ impl LruHashMap { /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { - get_ptr(self.def.get(), key) + get_ptr(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -145,53 +255,105 @@ impl LruHashMap { /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key) + get_ptr_mut(self.def.get() as *mut _, key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(self.def.get(), key, value, flags) + insert(self.def.get() as *mut _, key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { - remove(self.def.get(), key) + remove(self.def.get() as *mut _, key) } } +#[cfg(feature = "btf-maps")] +#[repr(transparent)] +pub struct PerCpuHashMap { + def: UnsafeCell>, +} +#[cfg(not(feature = "btf-maps"))] #[repr(transparent)] pub struct PerCpuHashMap { def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } +#[cfg(feature = "btf-maps")] +unsafe impl Sync + for PerCpuHashMap +{ +} +#[cfg(not(feature = "btf-maps"))] unsafe impl Sync for PerCpuHashMap {} +#[cfg(feature = "btf-maps")] +impl PerCpuHashMap { + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. + #[inline] + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. + #[inline] + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get() as *mut _, key) + } + + #[inline] + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get() as *mut _, key, value, flags) + } + + #[inline] + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get() as *mut _, key) + } +} +#[cfg(not(feature = "btf-maps"))] impl PerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_PERCPU_HASH as u32, max_entries, flags, PinningType::None, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap { PerCpuHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, + BPF_MAP_TYPE_PERCPU_HASH as u32, max_entries, flags, PinningType::ByName, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } @@ -202,7 +364,7 @@ impl PerCpuHashMap { /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { - get(self.def.get(), key) + get(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -210,7 +372,7 @@ impl PerCpuHashMap { /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { - get_ptr(self.def.get(), key) + get_ptr(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -219,53 +381,107 @@ impl PerCpuHashMap { /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key) + get_ptr_mut(self.def.get() as *mut _, key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(self.def.get(), key, value, flags) + insert(self.def.get() as *mut _, key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { - remove(self.def.get(), key) + remove(self.def.get() as *mut _, key) } } +#[cfg(feature = "btf-maps")] +#[repr(transparent)] +pub struct LruPerCpuHashMap { + def: UnsafeCell>, +} +#[cfg(not(feature = "btf-maps"))] #[repr(transparent)] pub struct LruPerCpuHashMap { def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } +#[cfg(feature = "btf-maps")] +unsafe impl Sync + for LruPerCpuHashMap +{ +} +#[cfg(not(feature = "btf-maps"))] unsafe impl Sync for LruPerCpuHashMap {} +#[cfg(feature = "btf-maps")] +impl + LruPerCpuHashMap +{ + /// Retrieve the value associate with `key` from the map. + /// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not + /// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the + /// map might get aliased by another element in the map, causing garbage to be read, or + /// corruption in case of writes. + #[inline] + pub unsafe fn get(&self, key: &K) -> Option<&V> { + get(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller + /// to decide whether it's safe to dereference the pointer or not. + #[inline] + pub fn get_ptr(&self, key: &K) -> Option<*const V> { + get_ptr(self.def.get() as *mut _, key) + } + + /// Retrieve the value associate with `key` from the map. + /// The same caveat as `get` applies, and additionally cares should be taken to avoid + /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the + /// pointer or not. + #[inline] + pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { + get_ptr_mut(self.def.get() as *mut _, key) + } + + #[inline] + pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { + insert(self.def.get() as *mut _, key, value, flags) + } + + #[inline] + pub fn remove(&self, key: &K) -> Result<(), c_long> { + remove(self.def.get() as *mut _, key) + } +} +#[cfg(not(feature = "btf-maps"))] impl LruPerCpuHashMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LRU_PERCPU_HASH as u32, max_entries, flags, PinningType::None, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap { LruPerCpuHashMap { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, + BPF_MAP_TYPE_LRU_PERCPU_HASH as u32, max_entries, flags, PinningType::ByName, )), - _k: PhantomData, - _v: PhantomData, + _k: core::marker::PhantomData, + _v: core::marker::PhantomData, } } @@ -276,7 +492,7 @@ impl LruPerCpuHashMap { /// corruption in case of writes. #[inline] pub unsafe fn get(&self, key: &K) -> Option<&V> { - get(self.def.get(), key) + get(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -284,7 +500,7 @@ impl LruPerCpuHashMap { /// to decide whether it's safe to dereference the pointer or not. #[inline] pub fn get_ptr(&self, key: &K) -> Option<*const V> { - get_ptr(self.def.get(), key) + get_ptr(self.def.get() as *mut _, key) } /// Retrieve the value associate with `key` from the map. @@ -293,25 +509,26 @@ impl LruPerCpuHashMap { /// pointer or not. #[inline] pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key) + get_ptr_mut(self.def.get() as *mut _, key) } #[inline] pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { - insert(self.def.get(), key, value, flags) + insert(self.def.get() as *mut _, key, value, flags) } #[inline] pub fn remove(&self, key: &K) -> Result<(), c_long> { - remove(self.def.get(), key) + remove(self.def.get() as *mut _, key) } } +#[cfg(not(feature = "btf-maps"))] const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { bpf_map_def { type_: ty, - key_size: mem::size_of::() as u32, - value_size: mem::size_of::() as u32, + key_size: core::mem::size_of::() as u32, + value_size: core::mem::size_of::() as u32, max_entries, map_flags: flags, id: 0, @@ -320,7 +537,7 @@ const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType } #[inline] -fn get_ptr_mut(def: *mut bpf_map_def, key: &K) -> Option<*mut V> { +fn get_ptr_mut(def: *mut c_void, key: &K) -> Option<*mut V> { unsafe { let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void); // FIXME: alignment @@ -329,20 +546,20 @@ fn get_ptr_mut(def: *mut bpf_map_def, key: &K) -> Option<*mut V> { } #[inline] -fn get_ptr(def: *mut bpf_map_def, key: &K) -> Option<*const V> { +fn get_ptr(def: *mut c_void, key: &K) -> Option<*const V> { get_ptr_mut(def, key).map(|p| p as *const V) } #[inline] -unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> { +unsafe fn get<'a, K, V>(def: *mut c_void, key: &K) -> Option<&'a V> { get_ptr(def, key).map(|p| &*p) } #[inline] -fn insert(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { +fn insert(def: *mut c_void, key: &K, value: &V, flags: u64) -> Result<(), c_long> { let ret = unsafe { bpf_map_update_elem( - def as *mut _, + def, key as *const _ as *const _, value as *const _ as *const _, flags, @@ -352,7 +569,7 @@ fn insert(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result } #[inline] -fn remove(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> { +fn remove(def: *mut c_void, key: &K) -> Result<(), c_long> { let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) }; (ret == 0).then_some(()).ok_or(ret) } diff --git a/bpf/aya-bpf/src/maps/lpm_trie.rs b/bpf/aya-bpf/src/maps/lpm_trie.rs index d9314899..790adb8e 100644 --- a/bpf/aya-bpf/src/maps/lpm_trie.rs +++ b/bpf/aya-bpf/src/maps/lpm_trie.rs @@ -37,7 +37,7 @@ impl LpmTrie { let flags = flags | BPF_F_NO_PREALLOC; LpmTrie { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_LPM_TRIE as u32, max_entries, flags, PinningType::None, @@ -51,7 +51,7 @@ impl LpmTrie { let flags = flags | BPF_F_NO_PREALLOC; LpmTrie { def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_LPM_TRIE as u32, max_entries, flags, PinningType::ByName, diff --git a/bpf/aya-bpf/src/maps/mod.rs b/bpf/aya-bpf/src/maps/mod.rs index 8fa375dd..6681d50c 100644 --- a/bpf/aya-bpf/src/maps/mod.rs +++ b/bpf/aya-bpf/src/maps/mod.rs @@ -30,3 +30,38 @@ pub use sock_hash::SockHash; pub use sock_map::SockMap; pub use stack::Stack; pub use stack_trace::StackTrace; + +#[cfg(feature = "btf-maps")] +mod btf_maps { + #[repr(C)] + pub(crate) struct MapDef< + K, + V, + const MAP_TYPE: usize, + const MAX_ENTRIES: usize, + const FLAGS: usize = 0, + > { + r#type: *const [i32; MAP_TYPE], + key: *const K, + value: *const V, + max_entries: *const [i32; MAX_ENTRIES], + map_flags: *const [i32; FLAGS], + } + + impl + MapDef + { + pub const fn new() -> Self { + Self { + r#type: ::core::ptr::null(), + key: ::core::ptr::null(), + value: ::core::ptr::null(), + max_entries: ::core::ptr::null(), + map_flags: ::core::ptr::null(), + } + } + } +} + +#[cfg(feature = "btf-maps")] +pub(crate) use btf_maps::*; diff --git a/bpf/aya-bpf/src/maps/per_cpu_array.rs b/bpf/aya-bpf/src/maps/per_cpu_array.rs index 9a5e388f..9c25b087 100644 --- a/bpf/aya-bpf/src/maps/per_cpu_array.rs +++ b/bpf/aya-bpf/src/maps/per_cpu_array.rs @@ -20,7 +20,7 @@ impl PerCpuArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray { PerCpuArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERCPU_ARRAY, + type_: BPF_MAP_TYPE_PERCPU_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -35,7 +35,7 @@ impl PerCpuArray { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray { PerCpuArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERCPU_ARRAY, + type_: BPF_MAP_TYPE_PERCPU_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/perf/perf_event_array.rs b/bpf/aya-bpf/src/maps/perf/perf_event_array.rs index c881a885..8b0236b7 100644 --- a/bpf/aya-bpf/src/maps/perf/perf_event_array.rs +++ b/bpf/aya-bpf/src/maps/perf/perf_event_array.rs @@ -23,7 +23,7 @@ impl PerfEventArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray { PerfEventArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, + type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -38,7 +38,7 @@ impl PerfEventArray { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray { PerfEventArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, + type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs b/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs index 46c3613f..be3ad273 100644 --- a/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs +++ b/bpf/aya-bpf/src/maps/perf/perf_event_byte_array.rs @@ -22,7 +22,7 @@ impl PerfEventByteArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray { PerfEventByteArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, + type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -36,7 +36,7 @@ impl PerfEventByteArray { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray { PerfEventByteArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, + type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/program_array.rs b/bpf/aya-bpf/src/maps/program_array.rs index b7e54a6c..0411226d 100644 --- a/bpf/aya-bpf/src/maps/program_array.rs +++ b/bpf/aya-bpf/src/maps/program_array.rs @@ -42,7 +42,7 @@ impl ProgramArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray { ProgramArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PROG_ARRAY, + type_: BPF_MAP_TYPE_PROG_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -56,7 +56,7 @@ impl ProgramArray { pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray { ProgramArray { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_PROG_ARRAY, + type_: BPF_MAP_TYPE_PROG_ARRAY as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/queue.rs b/bpf/aya-bpf/src/maps/queue.rs index 8c8f0bb1..037150cf 100644 --- a/bpf/aya-bpf/src/maps/queue.rs +++ b/bpf/aya-bpf/src/maps/queue.rs @@ -18,7 +18,7 @@ impl Queue { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue { Queue { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_QUEUE, + type_: BPF_MAP_TYPE_QUEUE as u32, key_size: 0, value_size: mem::size_of::() as u32, max_entries, @@ -33,7 +33,7 @@ impl Queue { pub const fn pinned(max_entries: u32, flags: u32) -> Queue { Queue { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_QUEUE, + type_: BPF_MAP_TYPE_QUEUE as u32, key_size: 0, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/sock_hash.rs b/bpf/aya-bpf/src/maps/sock_hash.rs index 39eedcce..e777f39e 100644 --- a/bpf/aya-bpf/src/maps/sock_hash.rs +++ b/bpf/aya-bpf/src/maps/sock_hash.rs @@ -25,7 +25,7 @@ impl SockHash { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash { SockHash { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_SOCKHASH, + type_: BPF_MAP_TYPE_SOCKHASH as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -40,7 +40,7 @@ impl SockHash { pub const fn pinned(max_entries: u32, flags: u32) -> SockHash { SockHash { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_SOCKHASH, + type_: BPF_MAP_TYPE_SOCKHASH as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/sock_map.rs b/bpf/aya-bpf/src/maps/sock_map.rs index 80c6fe7e..ba142e6c 100644 --- a/bpf/aya-bpf/src/maps/sock_map.rs +++ b/bpf/aya-bpf/src/maps/sock_map.rs @@ -24,7 +24,7 @@ impl SockMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap { SockMap { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_SOCKMAP, + type_: BPF_MAP_TYPE_SOCKMAP as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, @@ -38,7 +38,7 @@ impl SockMap { pub const fn pinned(max_entries: u32, flags: u32) -> SockMap { SockMap { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_SOCKMAP, + type_: BPF_MAP_TYPE_SOCKMAP as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/stack.rs b/bpf/aya-bpf/src/maps/stack.rs index 6328693d..bd1ecf66 100644 --- a/bpf/aya-bpf/src/maps/stack.rs +++ b/bpf/aya-bpf/src/maps/stack.rs @@ -16,7 +16,7 @@ impl Stack { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Stack { Stack { def: bpf_map_def { - type_: BPF_MAP_TYPE_STACK, + type_: BPF_MAP_TYPE_STACK as u32, key_size: 0, value_size: mem::size_of::() as u32, max_entries, @@ -31,7 +31,7 @@ impl Stack { pub const fn pinned(max_entries: u32, flags: u32) -> Stack { Stack { def: bpf_map_def { - type_: BPF_MAP_TYPE_STACK, + type_: BPF_MAP_TYPE_STACK as u32, key_size: 0, value_size: mem::size_of::() as u32, max_entries, diff --git a/bpf/aya-bpf/src/maps/stack_trace.rs b/bpf/aya-bpf/src/maps/stack_trace.rs index 647e3dd4..b65cf396 100644 --- a/bpf/aya-bpf/src/maps/stack_trace.rs +++ b/bpf/aya-bpf/src/maps/stack_trace.rs @@ -20,7 +20,7 @@ impl StackTrace { pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace { StackTrace { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_STACK_TRACE, + type_: BPF_MAP_TYPE_STACK_TRACE as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32 * PERF_MAX_STACK_DEPTH, max_entries, @@ -34,7 +34,7 @@ impl StackTrace { pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace { StackTrace { def: UnsafeCell::new(bpf_map_def { - type_: BPF_MAP_TYPE_STACK_TRACE, + type_: BPF_MAP_TYPE_STACK_TRACE as u32, key_size: mem::size_of::() as u32, value_size: mem::size_of::() as u32 * PERF_MAX_STACK_DEPTH, max_entries,