From 974386e4bcb61d9aa7c2592b79d075da16c68201 Mon Sep 17 00:00:00 2001 From: Michal R Date: Mon, 29 Sep 2025 08:20:58 +0200 Subject: [PATCH] aya-ebpf: Reduce repetition and improve documentation of `maps::hash_map` * The methods of all structs are almost identical, use a macro to reduce code repetition. * Use the third person in all docstrings.. * Make use of `#[doc]` and split out the most repetetive chunks into separate files. * Make the `Safety` comment for `get*` operations more clear, provide context and inks. --- ebpf/aya-ebpf/src/maps/docs/hash_map.md | 9 + .../src/maps/docs/hash_map_examples.md | 43 ++ ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md | 10 + .../src/maps/docs/lru_hash_map_examples.md | 45 ++ .../src/maps/docs/lru_per_cpu_hash_map.md | 10 + .../docs/lru_per_cpu_hash_map_examples.md | 44 ++ .../src/maps/docs/per_cpu_hash_map.md | 10 + .../maps/docs/per_cpu_hash_map_examples.md | 43 ++ ebpf/aya-ebpf/src/maps/hash_map.rs | 425 +++++------------- ebpf/aya-ebpf/src/maps/map_safety.md | 32 ++ xtask/public-api/aya-ebpf.txt | 8 +- 11 files changed, 357 insertions(+), 322 deletions(-) create mode 100644 ebpf/aya-ebpf/src/maps/docs/hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/hash_map_examples.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_hash_map_examples.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map_examples.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map_examples.md create mode 100644 ebpf/aya-ebpf/src/maps/map_safety.md diff --git a/ebpf/aya-ebpf/src/maps/docs/hash_map.md b/ebpf/aya-ebpf/src/maps/docs/hash_map.md new file mode 100644 index 00000000..e22a9aa3 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/hash_map.md @@ -0,0 +1,9 @@ + + + +A hash map that can be shared between eBPF programs and user space. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 3.19. diff --git a/ebpf/aya-ebpf/src/maps/docs/hash_map_examples.md b/ebpf/aya-ebpf/src/maps/docs/hash_map_examples.md new file mode 100644 index 00000000..884ef585 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/hash_map_examples.md @@ -0,0 +1,43 @@ +# Examples + +```rust,no_run +use aya_ebpf::{ + maps::HashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; + +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: HashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32 +> = HashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers an error. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); + } +} +``` diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md new file mode 100644 index 00000000..a486d8f4 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md @@ -0,0 +1,10 @@ + + + +An LRU hash map that can be shared between eBPF programs and user space. +When it reaches the capacity `M`, the least used element is evicted. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.10. diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_hash_map_examples.md b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map_examples.md new file mode 100644 index 00000000..8e2c0708 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map_examples.md @@ -0,0 +1,45 @@ +# Examples + +```rust,no_run +use aya_ebpf::{ + maps::LruHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; + +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: LruHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32, + +> = LruHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers eviction of + // the least used elements. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); + } +} +``` diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md new file mode 100644 index 00000000..1ddebdbb --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md @@ -0,0 +1,10 @@ + + + +Similar to [`LruHashMap`] but each CPU holds a separate value for a given +key. Typically used to minimize lock contention in eBPF programs. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.10. diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map_examples.md b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map_examples.md new file mode 100644 index 00000000..0c05e3c1 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map_examples.md @@ -0,0 +1,44 @@ +# Examples + +```rust,no_run +use aya_ebpf::{ + maps::LruPerCpuHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; + +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: LruPerCpuHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32 +> = LruPerCpuHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers eviction of + // the least used elements. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); + } +} +``` diff --git a/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md new file mode 100644 index 00000000..33e62080 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md @@ -0,0 +1,10 @@ + + + +Similar to [`HashMap`] but each CPU holds a separate value for a given key. +Typically used to minimize lock contention in eBPF programs. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.6. diff --git a/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map_examples.md b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map_examples.md new file mode 100644 index 00000000..52ffad53 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map_examples.md @@ -0,0 +1,43 @@ +# Examples + +```rust,no_run +use aya_ebpf::{ + maps::PerCpuHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; + +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: PerCpuHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32 +> = PerCpuHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers an error. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); + } +} +``` diff --git a/ebpf/aya-ebpf/src/maps/hash_map.rs b/ebpf/aya-ebpf/src/maps/hash_map.rs index 1b34ac03..b839118c 100644 --- a/ebpf/aya-ebpf/src/maps/hash_map.rs +++ b/ebpf/aya-ebpf/src/maps/hash_map.rs @@ -12,329 +12,118 @@ use crate::{ remove, }; -#[repr(transparent)] -pub struct HashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for HashMap {} - -impl HashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) - } -} - -#[repr(transparent)] -pub struct LruHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for LruHashMap {} - -impl LruHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) - } -} - -#[repr(transparent)] -pub struct PerCpuHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for PerCpuHashMap {} - -impl PerCpuHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, +/// Generates a hash map definition with common methods. +macro_rules! hash_map { + ( + $map_doc:literal, + $map_doc_examples:literal, + $name:ident, + $t:ident + $(,)? + ) => { + #[doc = include_str!($map_doc)] + #[doc = include_str!($map_doc_examples)] + #[repr(transparent)] + pub struct $name { + def: UnsafeCell, + _k: PhantomData, + _v: PhantomData, } - } - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, + unsafe impl Sync for $name {} + + impl $name { + pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { + Self { + def: UnsafeCell::new(build_def::( + $t, + max_entries, + flags, + PinningType::None, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + pub const fn pinned(max_entries: u32, flags: u32) -> Self { + Self { + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_HASH, + max_entries, + flags, + PinningType::ByName, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + #[doc = "Retrieves the value associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { + unsafe { get(self.def.get(), key.borrow()) } + } + + #[doc = "Retrieves the pointer associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { + get_ptr(self.def.get(), key.borrow()) + } + + #[doc = "Retrieves the mutable pointer associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key.borrow()) + } + + /// Inserts a key-value pair into the map. + #[inline] + pub fn insert( + &self, + key: impl Borrow, + value: impl Borrow, + flags: u64, + ) -> Result<(), c_long> { + insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) + } + + /// Removes a key from the map. + #[inline] + pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { + remove(self.def.get().cast(), key.borrow()) + } } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) - } + }; } -#[repr(transparent)] -pub struct LruPerCpuHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for LruPerCpuHashMap {} - -impl LruPerCpuHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) - } -} +hash_map!( + "docs/hash_map.md", + "docs/hash_map_examples.md", + HashMap, + BPF_MAP_TYPE_HASH, +); +hash_map!( + "docs/lru_hash_map.md", + "docs/lru_hash_map_examples.md", + LruHashMap, + BPF_MAP_TYPE_LRU_HASH, +); +hash_map!( + "docs/per_cpu_hash_map.md", + "docs/per_cpu_hash_map_examples.md", + PerCpuHashMap, + BPF_MAP_TYPE_PERCPU_HASH +); +hash_map!( + "docs/lru_per_cpu_hash_map.md", + "docs/lru_per_cpu_hash_map_examples.md", + LruPerCpuHashMap, + BPF_MAP_TYPE_LRU_PERCPU_HASH +); const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { bpf_map_def { diff --git a/ebpf/aya-ebpf/src/maps/map_safety.md b/ebpf/aya-ebpf/src/maps/map_safety.md new file mode 100644 index 00000000..68214e53 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/map_safety.md @@ -0,0 +1,32 @@ +# Safety + +The pointer returned by a BPF map lookup is only stable until a concurrent +update or delete. In the kernel’s default *preallocated* mode (no +`BPF_F_NO_PREALLOC`), deleted elements are immediately recycled onto a +per-CPU freelist and may be reused by another update before an RCU grace +period elapses. Readers can therefore observe aliasing (values changing +underneath them) or, in rare cases, false-positive lookups when an old and +new key overlap. This behavior was reported on [LKML in 2018][lkml-2018]. + +Using `BPF_F_NO_PREALLOC` historically forced RCU-delayed freeing, but since +the switch to `bpf_mem_alloc`, both prealloc and no-prealloc modes may +recycle elements quickly; the main distinction now is +[memory vs. allocation overhead][htab-atomic-overwrite]. + +The [official kernel docs][kernel-doc-map-hash] describe `BPF_F_NO_PREALLOC` +as a *memory-usage knob*, not a safety guarantee. + +Patches in 2020 mitigated some issues (e.g. +[zero-filling reused per-CPU slots][zero-filling]) but did not eliminate reuse +races. + +A 2023 patch by Alexei proposed a fallback scheme to +[delay reuse via RCU grace periods in certain conditions][reuse-delay] (rather +than always reusing immediately). However, this approach is not universally +applied, and immediate reuse is still considered a “known quirk” in many cases. + +[lkml-2018]: https://lore.kernel.org/lkml/CAG48ez1-WZH55+Wa2vgwZY_hpZJfnDxMzxGLtuN1hG1z6hKf5Q@mail.gmail.com/T/ +[htab-atomic-overwrite]: https://lore.kernel.org/bpf/20250204082848.13471-2-hotforest@gmail.com/T/ +[kernel-doc-map-hash]: https://www.kernel.org/doc/html/v6.10/bpf/map_hash.html +[zero-filling]: https://lore.kernel.org/all/20201104112332.15191-1-david.verbeiren@tessares.net/ +[reuse-delay]: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com/ diff --git a/xtask/public-api/aya-ebpf.txt b/xtask/public-api/aya-ebpf.txt index ea62c132..74317218 100644 --- a/xtask/public-api/aya-ebpf.txt +++ b/xtask/public-api/aya-ebpf.txt @@ -377,7 +377,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::insert(&self, key: impl pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -408,7 +408,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::insert(&self, key: impl co pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -1202,7 +1202,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::insert(&self, key: impl pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -1262,7 +1262,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::insert(&self, key: impl co pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin