aya-bpf: Add BTF maps (as a feature)

The new `btf-maps` feature enables usage of BTF-defined BPF map
definitions[0] instead of legacy ones.

This is still work in progress and it needs modification of debug info
with bpf-linker, which is being worked on in aya-rs/bpf-linker#35. This
draft PR is a reference for the bpf-linker work for now.

[0] https://lwn.net/Articles/790177/
pull/596/head
Michal Rostecki 1 year ago
parent 0a1f1a2898
commit 1d1a8f9fc9

@ -14,3 +14,6 @@ syn = {version = "1.0", features = ["full"]}
[dev-dependencies] [dev-dependencies]
aya-bpf = { path = "../bpf/aya-bpf" } aya-bpf = { path = "../bpf/aya-bpf" }
[features]
btf-maps = []

@ -93,7 +93,8 @@ impl Map {
} }
pub fn expand(&self) -> Result<TokenStream> { pub fn expand(&self) -> Result<TokenStream> {
let section_name = "maps".to_string(); // TODO(vadorovsky): Handle with feature.
let section_name = ".maps".to_string();
let name = &self.name; let name = &self.name;
let item = &self.item; let item = &self.item;
Ok(quote! { Ok(quote! {

@ -43,6 +43,78 @@ pub mod bindings {
pub const TC_ACT_VALUE_MAX: i32 = crate::gen::bindings::TC_ACT_VALUE_MAX as i32; pub const TC_ACT_VALUE_MAX: i32 = crate::gen::bindings::TC_ACT_VALUE_MAX as i32;
pub const TC_ACT_EXT_VAL_MASK: i32 = 268435455; pub const TC_ACT_EXT_VAL_MASK: i32 = 268435455;
// TODO(vadorovsky): Handle that with a macro.
pub mod bpf_map_type {
pub const BPF_MAP_TYPE_UNSPEC: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_UNSPEC as usize;
pub const BPF_MAP_TYPE_HASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_HASH as usize;
pub const BPF_MAP_TYPE_ARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_ARRAY as usize;
pub const BPF_MAP_TYPE_PROG_ARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PROG_ARRAY as usize;
pub const BPF_MAP_TYPE_PERF_EVENT_ARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY as usize;
pub const BPF_MAP_TYPE_PERCPU_HASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH as usize;
pub const BPF_MAP_TYPE_PERCPU_ARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY as usize;
pub const BPF_MAP_TYPE_STACK_TRACE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STACK_TRACE as usize;
pub const BPF_MAP_TYPE_CGROUP_ARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_ARRAY as usize;
pub const BPF_MAP_TYPE_LRU_HASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LRU_HASH as usize;
pub const BPF_MAP_TYPE_LRU_PERCPU_HASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH as usize;
pub const BPF_MAP_TYPE_LPM_TRIE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_LPM_TRIE as usize;
pub const BPF_MAP_TYPE_ARRAY_OF_MAPS: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_ARRAY_OF_MAPS as usize;
pub const BPF_MAP_TYPE_HASH_OF_MAPS: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_HASH_OF_MAPS as usize;
pub const BPF_MAP_TYPE_DEVMAP: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP as usize;
pub const BPF_MAP_TYPE_SOCKMAP: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SOCKMAP as usize;
pub const BPF_MAP_TYPE_CPUMAP: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CPUMAP as usize;
pub const BPF_MAP_TYPE_XSKMAP: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_XSKMAP as usize;
pub const BPF_MAP_TYPE_SOCKHASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SOCKHASH as usize;
pub const BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED as usize;
pub const BPF_MAP_TYPE_CGROUP_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE as usize;
pub const BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_REUSEPORT_SOCKARRAY as usize;
pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE as usize;
pub const BPF_MAP_TYPE_QUEUE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_QUEUE as usize;
pub const BPF_MAP_TYPE_STACK: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STACK as usize;
pub const BPF_MAP_TYPE_SK_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_SK_STORAGE as usize;
pub const BPF_MAP_TYPE_DEVMAP_HASH: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH as usize;
pub const BPF_MAP_TYPE_STRUCT_OPS: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_STRUCT_OPS as usize;
pub const BPF_MAP_TYPE_RINGBUF: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_RINGBUF as usize;
pub const BPF_MAP_TYPE_INODE_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_INODE_STORAGE as usize;
pub const BPF_MAP_TYPE_TASK_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_TASK_STORAGE as usize;
pub const BPF_MAP_TYPE_BLOOM_FILTER: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER as usize;
pub const BPF_MAP_TYPE_USER_RINGBUF: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_USER_RINGBUF as usize;
pub const BPF_MAP_TYPE_CGRP_STORAGE: usize =
crate::gen::bindings::bpf_map_type::BPF_MAP_TYPE_CGRP_STORAGE as usize;
}
#[repr(C)] #[repr(C)]
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct bpf_map_def { pub struct bpf_map_def {

@ -11,3 +11,7 @@ aya-bpf-bindings = { path = "../aya-bpf-bindings" }
[build-dependencies] [build-dependencies]
rustversion = "1.0" rustversion = "1.0"
[features]
default = []
btf-maps = ["aya-bpf-macros/btf-maps"]

@ -8,6 +8,11 @@ use crate::{
maps::PinningType, maps::PinningType,
}; };
// #[cfg(feature = "btf-maps")]
// #[repr(transparent)]
// pub struct Array<T, const MAX_ENTRIES: usize, const FLAGS: usize = 0> {
// def: UnsafeCell<super>,
// }
#[repr(transparent)] #[repr(transparent)]
pub struct Array<T> { pub struct Array<T> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
@ -20,7 +25,7 @@ impl<T> Array<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> {
Array { Array {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY, type_: BPF_MAP_TYPE_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,
@ -35,7 +40,7 @@ impl<T> Array<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> { pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> {
Array { Array {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY, type_: BPF_MAP_TYPE_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,

@ -18,7 +18,7 @@ impl<T> BloomFilter<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> BloomFilter<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> BloomFilter<T> {
BloomFilter { BloomFilter {
def: build_def::<T>( def: build_def::<T>(
BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_BLOOM_FILTER as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
@ -30,7 +30,7 @@ impl<T> BloomFilter<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> BloomFilter<T> { pub const fn pinned(max_entries: u32, flags: u32) -> BloomFilter<T> {
BloomFilter { BloomFilter {
def: build_def::<T>( def: build_def::<T>(
BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_BLOOM_FILTER as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,

@ -1,49 +1,107 @@
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr::NonNull}; use core::{cell::UnsafeCell, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_map_type::{ use aya_bpf_bindings::bindings::bpf_map_type::{
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH,
}; };
use aya_bpf_cty::{c_long, c_void}; use aya_bpf_cty::{c_long, c_void};
#[cfg(not(feature = "btf-maps"))]
use crate::{bindings::bpf_map_def, maps::PinningType};
use crate::{ use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_HASH}, bindings::bpf_map_type::BPF_MAP_TYPE_HASH,
helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem}, helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem},
maps::PinningType,
}; };
#[cfg(feature = "btf-maps")]
#[repr(transparent)]
pub struct HashMap<K, V, const MAX_ENTRIES: usize, const FLAGS: usize = 0> {
def: UnsafeCell<super::MapDef<K, V, BPF_MAP_TYPE_HASH, MAX_ENTRIES, FLAGS>>,
}
#[cfg(not(feature = "btf-maps"))]
#[repr(transparent)] #[repr(transparent)]
pub struct HashMap<K, V> { pub struct HashMap<K, V> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: core::marker::PhantomData<K>,
_v: PhantomData<V>, _v: core::marker::PhantomData<V>,
} }
#[cfg(feature = "btf-maps")]
unsafe impl<K: Sync, V: Sync, const MAX_ENTRIES: usize, const FLAGS: usize> Sync
for HashMap<K, V, MAX_ENTRIES, FLAGS>
{
}
#[cfg(not(feature = "btf-maps"))]
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {} unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
#[cfg(feature = "btf-maps")]
impl<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> HashMap<K, V, MAX_ENTRIES, FLAGS> {
pub const fn new() -> Self {
Self {
def: UnsafeCell::new(super::MapDef::new()),
}
}
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get() as *mut _, key)
}
#[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get() as *mut _, key, value, flags)
}
#[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get() as *mut _, key)
}
}
#[cfg(not(feature = "btf-maps"))]
impl<K, V> HashMap<K, V> { impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap { HashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap { HashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
@ -54,7 +112,7 @@ impl<K, V> HashMap<K, V> {
/// corruption in case of writes. /// corruption in case of writes.
#[inline] #[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> { pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key) get(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -62,7 +120,7 @@ impl<K, V> HashMap<K, V> {
/// to decide whether it's safe to dereference the pointer or not. /// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key) get_ptr(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -71,53 +129,105 @@ impl<K, V> HashMap<K, V> {
/// pointer or not. /// pointer or not.
#[inline] #[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key) get_ptr_mut(self.def.get() as *mut _, key)
} }
#[inline] #[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags) insert(self.def.get() as *mut _, key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key) remove(self.def.get() as *mut _, key)
} }
} }
#[cfg(feature = "btf-maps")]
#[repr(transparent)]
pub struct LruHashMap<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> {
def: UnsafeCell<super::MapDef<K, V, BPF_MAP_TYPE_LRU_HASH, MAX_ENTRIES, FLAGS>>,
}
#[cfg(not(feature = "btf-maps"))]
#[repr(transparent)] #[repr(transparent)]
pub struct LruHashMap<K, V> { pub struct LruHashMap<K, V> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: core::marker::PhantomData<K>,
_v: PhantomData<V>, _v: core::marker::PhantomData<V>,
} }
#[cfg(feature = "btf-maps")]
unsafe impl<K: Sync, V: Sync, const MAX_ENTRIES: usize, const FLAGS: usize> Sync
for LruHashMap<K, V, MAX_ENTRIES, FLAGS>
{
}
#[cfg(not(feature = "btf-maps"))]
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {} unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
#[cfg(feature = "btf-maps")]
impl<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> LruHashMap<K, V, MAX_ENTRIES, FLAGS> {
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get() as *mut _, key)
}
#[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get() as *mut _, key, value, flags)
}
#[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get() as *mut _, key)
}
}
#[cfg(not(feature = "btf-maps"))]
impl<K, V> LruHashMap<K, V> { impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap { LruHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap { LruHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
@ -128,7 +238,7 @@ impl<K, V> LruHashMap<K, V> {
/// corruption in case of writes. /// corruption in case of writes.
#[inline] #[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> { pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key) get(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -136,7 +246,7 @@ impl<K, V> LruHashMap<K, V> {
/// to decide whether it's safe to dereference the pointer or not. /// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key) get_ptr(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -145,53 +255,105 @@ impl<K, V> LruHashMap<K, V> {
/// pointer or not. /// pointer or not.
#[inline] #[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key) get_ptr_mut(self.def.get() as *mut _, key)
} }
#[inline] #[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags) insert(self.def.get() as *mut _, key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key) remove(self.def.get() as *mut _, key)
} }
} }
#[cfg(feature = "btf-maps")]
#[repr(transparent)]
pub struct PerCpuHashMap<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> {
def: UnsafeCell<super::MapDef<K, V, BPF_MAP_TYPE_PERCPU_HASH, MAX_ENTRIES, FLAGS>>,
}
#[cfg(not(feature = "btf-maps"))]
#[repr(transparent)] #[repr(transparent)]
pub struct PerCpuHashMap<K, V> { pub struct PerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: core::marker::PhantomData<K>,
_v: PhantomData<V>, _v: core::marker::PhantomData<V>,
} }
#[cfg(feature = "btf-maps")]
unsafe impl<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> Sync
for PerCpuHashMap<K, V, MAX_ENTRIES, FLAGS>
{
}
#[cfg(not(feature = "btf-maps"))]
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {} unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
#[cfg(feature = "btf-maps")]
impl<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> PerCpuHashMap<K, V, MAX_ENTRIES, FLAGS> {
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get() as *mut _, key)
}
#[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get() as *mut _, key, value, flags)
}
#[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get() as *mut _, key)
}
}
#[cfg(not(feature = "btf-maps"))]
impl<K, V> PerCpuHashMap<K, V> { impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap { PerCpuHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap { PerCpuHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH, BPF_MAP_TYPE_PERCPU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
@ -202,7 +364,7 @@ impl<K, V> PerCpuHashMap<K, V> {
/// corruption in case of writes. /// corruption in case of writes.
#[inline] #[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> { pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key) get(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -210,7 +372,7 @@ impl<K, V> PerCpuHashMap<K, V> {
/// to decide whether it's safe to dereference the pointer or not. /// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key) get_ptr(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -219,53 +381,107 @@ impl<K, V> PerCpuHashMap<K, V> {
/// pointer or not. /// pointer or not.
#[inline] #[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key) get_ptr_mut(self.def.get() as *mut _, key)
} }
#[inline] #[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags) insert(self.def.get() as *mut _, key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key) remove(self.def.get() as *mut _, key)
} }
} }
#[cfg(feature = "btf-maps")]
#[repr(transparent)]
pub struct LruPerCpuHashMap<K, V, const MAX_ENTRIES: usize, const FLAGS: usize> {
def: UnsafeCell<super::MapDef<K, V, BPF_MAP_TYPE_LRU_PERCPU_HASH, MAX_ENTRIES, FLAGS>>,
}
#[cfg(not(feature = "btf-maps"))]
#[repr(transparent)] #[repr(transparent)]
pub struct LruPerCpuHashMap<K, V> { pub struct LruPerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>, def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>, _k: core::marker::PhantomData<K>,
_v: PhantomData<V>, _v: core::marker::PhantomData<V>,
} }
#[cfg(feature = "btf-maps")]
unsafe impl<K: Sync, V: Sync, const MAX_ENTRIES: usize, const FLAGS: usize> Sync
for LruPerCpuHashMap<K, V, MAX_ENTRIES, FLAGS>
{
}
#[cfg(not(feature = "btf-maps"))]
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {} unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
#[cfg(feature = "btf-maps")]
impl<K, V, const MAX_ENTRIES: usize, const FLAGS: usize>
LruPerCpuHashMap<K, V, MAX_ENTRIES, FLAGS>
{
/// Retrieve the value associate with `key` from the map.
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
/// map might get aliased by another element in the map, causing garbage to be read, or
/// corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get() as *mut _, key)
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get() as *mut _, key)
}
#[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get() as *mut _, key, value, flags)
}
#[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get() as *mut _, key)
}
}
#[cfg(not(feature = "btf-maps"))]
impl<K, V> LruPerCpuHashMap<K, V> { impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap { LruPerCpuHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> { pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap { LruPerCpuHashMap {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,
)), )),
_k: PhantomData, _k: core::marker::PhantomData,
_v: PhantomData, _v: core::marker::PhantomData,
} }
} }
@ -276,7 +492,7 @@ impl<K, V> LruPerCpuHashMap<K, V> {
/// corruption in case of writes. /// corruption in case of writes.
#[inline] #[inline]
pub unsafe fn get(&self, key: &K) -> Option<&V> { pub unsafe fn get(&self, key: &K) -> Option<&V> {
get(self.def.get(), key) get(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -284,7 +500,7 @@ impl<K, V> LruPerCpuHashMap<K, V> {
/// to decide whether it's safe to dereference the pointer or not. /// to decide whether it's safe to dereference the pointer or not.
#[inline] #[inline]
pub fn get_ptr(&self, key: &K) -> Option<*const V> { pub fn get_ptr(&self, key: &K) -> Option<*const V> {
get_ptr(self.def.get(), key) get_ptr(self.def.get() as *mut _, key)
} }
/// Retrieve the value associate with `key` from the map. /// Retrieve the value associate with `key` from the map.
@ -293,25 +509,26 @@ impl<K, V> LruPerCpuHashMap<K, V> {
/// pointer or not. /// pointer or not.
#[inline] #[inline]
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> { pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key) get_ptr_mut(self.def.get() as *mut _, key)
} }
#[inline] #[inline]
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> { pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
insert(self.def.get(), key, value, flags) insert(self.def.get() as *mut _, key, value, flags)
} }
#[inline] #[inline]
pub fn remove(&self, key: &K) -> Result<(), c_long> { pub fn remove(&self, key: &K) -> Result<(), c_long> {
remove(self.def.get(), key) remove(self.def.get() as *mut _, key)
} }
} }
#[cfg(not(feature = "btf-maps"))]
const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def {
bpf_map_def { bpf_map_def {
type_: ty, type_: ty,
key_size: mem::size_of::<K>() as u32, key_size: core::mem::size_of::<K>() as u32,
value_size: mem::size_of::<V>() as u32, value_size: core::mem::size_of::<V>() as u32,
max_entries, max_entries,
map_flags: flags, map_flags: flags,
id: 0, id: 0,
@ -320,7 +537,7 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
} }
#[inline] #[inline]
fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> { fn get_ptr_mut<K, V>(def: *mut c_void, key: &K) -> Option<*mut V> {
unsafe { unsafe {
let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void); let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
// FIXME: alignment // FIXME: alignment
@ -329,20 +546,20 @@ fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
} }
#[inline] #[inline]
fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> { fn get_ptr<K, V>(def: *mut c_void, key: &K) -> Option<*const V> {
get_ptr_mut(def, key).map(|p| p as *const V) get_ptr_mut(def, key).map(|p| p as *const V)
} }
#[inline] #[inline]
unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> { unsafe fn get<'a, K, V>(def: *mut c_void, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| &*p) get_ptr(def, key).map(|p| &*p)
} }
#[inline] #[inline]
fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result<(), c_long> { fn insert<K, V>(def: *mut c_void, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
let ret = unsafe { let ret = unsafe {
bpf_map_update_elem( bpf_map_update_elem(
def as *mut _, def,
key as *const _ as *const _, key as *const _ as *const _,
value as *const _ as *const _, value as *const _ as *const _,
flags, flags,
@ -352,7 +569,7 @@ fn insert<K, V>(def: *mut bpf_map_def, key: &K, value: &V, flags: u64) -> Result
} }
#[inline] #[inline]
fn remove<K>(def: *mut bpf_map_def, key: &K) -> Result<(), c_long> { fn remove<K>(def: *mut c_void, key: &K) -> Result<(), c_long> {
let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) }; let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
(ret == 0).then_some(()).ok_or(ret) (ret == 0).then_some(()).ok_or(ret)
} }

@ -37,7 +37,7 @@ impl<K, V> LpmTrie<K, V> {
let flags = flags | BPF_F_NO_PREALLOC; let flags = flags | BPF_F_NO_PREALLOC;
LpmTrie { LpmTrie {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_LPM_TRIE as u32,
max_entries, max_entries,
flags, flags,
PinningType::None, PinningType::None,
@ -51,7 +51,7 @@ impl<K, V> LpmTrie<K, V> {
let flags = flags | BPF_F_NO_PREALLOC; let flags = flags | BPF_F_NO_PREALLOC;
LpmTrie { LpmTrie {
def: UnsafeCell::new(build_def::<K, V>( def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_LPM_TRIE as u32,
max_entries, max_entries,
flags, flags,
PinningType::ByName, PinningType::ByName,

@ -30,3 +30,38 @@ pub use sock_hash::SockHash;
pub use sock_map::SockMap; pub use sock_map::SockMap;
pub use stack::Stack; pub use stack::Stack;
pub use stack_trace::StackTrace; pub use stack_trace::StackTrace;
#[cfg(feature = "btf-maps")]
mod btf_maps {
#[repr(C)]
pub(crate) struct MapDef<
K,
V,
const MAP_TYPE: usize,
const MAX_ENTRIES: usize,
const FLAGS: usize = 0,
> {
r#type: *const [i32; MAP_TYPE],
key: *const K,
value: *const V,
max_entries: *const [i32; MAX_ENTRIES],
map_flags: *const [i32; FLAGS],
}
impl<K, V, const MAP_TYPE: usize, const MAX_ENTRIES: usize, const FLAGS: usize>
MapDef<K, V, MAP_TYPE, MAX_ENTRIES, FLAGS>
{
pub const fn new() -> Self {
Self {
r#type: ::core::ptr::null(),
key: ::core::ptr::null(),
value: ::core::ptr::null(),
max_entries: ::core::ptr::null(),
map_flags: ::core::ptr::null(),
}
}
}
}
#[cfg(feature = "btf-maps")]
pub(crate) use btf_maps::*;

@ -20,7 +20,7 @@ impl<T> PerCpuArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray { PerCpuArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY, type_: BPF_MAP_TYPE_PERCPU_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,
@ -35,7 +35,7 @@ impl<T> PerCpuArray<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> { pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray { PerCpuArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY, type_: BPF_MAP_TYPE_PERCPU_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,

@ -23,7 +23,7 @@ impl<T> PerfEventArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray { PerfEventArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,
@ -38,7 +38,7 @@ impl<T> PerfEventArray<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray<T> { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventArray<T> {
PerfEventArray { PerfEventArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,

@ -22,7 +22,7 @@ impl PerfEventByteArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray { PerfEventByteArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,
@ -36,7 +36,7 @@ impl PerfEventByteArray {
pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray { pub const fn pinned(max_entries: u32, flags: u32) -> PerfEventByteArray {
PerfEventByteArray { PerfEventByteArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY, type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,

@ -42,7 +42,7 @@ impl ProgramArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray { pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray { ProgramArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY, type_: BPF_MAP_TYPE_PROG_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,
@ -56,7 +56,7 @@ impl ProgramArray {
pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray { pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray { ProgramArray {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY, type_: BPF_MAP_TYPE_PROG_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,

@ -18,7 +18,7 @@ impl<T> Queue<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> {
Queue { Queue {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE, type_: BPF_MAP_TYPE_QUEUE as u32,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,
@ -33,7 +33,7 @@ impl<T> Queue<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> { pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> {
Queue { Queue {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE, type_: BPF_MAP_TYPE_QUEUE as u32,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,

@ -25,7 +25,7 @@ impl<K> SockHash<K> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash { SockHash {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH, type_: BPF_MAP_TYPE_SOCKHASH as u32,
key_size: mem::size_of::<K>() as u32, key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,
@ -40,7 +40,7 @@ impl<K> SockHash<K> {
pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> { pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash { SockHash {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH, type_: BPF_MAP_TYPE_SOCKHASH as u32,
key_size: mem::size_of::<K>() as u32, key_size: mem::size_of::<K>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,

@ -24,7 +24,7 @@ impl SockMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap { pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap {
SockMap { SockMap {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP, type_: BPF_MAP_TYPE_SOCKMAP as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,
@ -38,7 +38,7 @@ impl SockMap {
pub const fn pinned(max_entries: u32, flags: u32) -> SockMap { pub const fn pinned(max_entries: u32, flags: u32) -> SockMap {
SockMap { SockMap {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP, type_: BPF_MAP_TYPE_SOCKMAP as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32, value_size: mem::size_of::<u32>() as u32,
max_entries, max_entries,

@ -16,7 +16,7 @@ impl<T> Stack<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Stack<T> { pub const fn with_max_entries(max_entries: u32, flags: u32) -> Stack<T> {
Stack { Stack {
def: bpf_map_def { def: bpf_map_def {
type_: BPF_MAP_TYPE_STACK, type_: BPF_MAP_TYPE_STACK as u32,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,
@ -31,7 +31,7 @@ impl<T> Stack<T> {
pub const fn pinned(max_entries: u32, flags: u32) -> Stack<T> { pub const fn pinned(max_entries: u32, flags: u32) -> Stack<T> {
Stack { Stack {
def: bpf_map_def { def: bpf_map_def {
type_: BPF_MAP_TYPE_STACK, type_: BPF_MAP_TYPE_STACK as u32,
key_size: 0, key_size: 0,
value_size: mem::size_of::<T>() as u32, value_size: mem::size_of::<T>() as u32,
max_entries, max_entries,

@ -20,7 +20,7 @@ impl StackTrace {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace { pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace {
StackTrace { StackTrace {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE, type_: BPF_MAP_TYPE_STACK_TRACE as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH, value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
max_entries, max_entries,
@ -34,7 +34,7 @@ impl StackTrace {
pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace { pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace {
StackTrace { StackTrace {
def: UnsafeCell::new(bpf_map_def { def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE, type_: BPF_MAP_TYPE_STACK_TRACE as u32,
key_size: mem::size_of::<u32>() as u32, key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH, value_size: mem::size_of::<u64>() as u32 * PERF_MAX_STACK_DEPTH,
max_entries, max_entries,

Loading…
Cancel
Save