From 3a7d1bebb45fb8fdb7117592524921285c94595c Mon Sep 17 00:00:00 2001 From: Michal R Date: Mon, 29 Sep 2025 08:20:58 +0200 Subject: [PATCH] aya-ebpf: Reduce repetition and improve documentation of `maps::hash_map` * The methods of all structs are almost identical, use a macro to reduce code repetition. * Use the third person in all docstrings.. * Make use of `#[doc]` and split out the most repetetive chunks into separate files. * Make the `Safety` comment for `get*` operations more clear, provide context and inks. --- ebpf/aya-ebpf/src/maps/docs/hash_map.md | 9 + ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md | 10 + .../src/maps/docs/lru_per_cpu_hash_map.md | 10 + .../src/maps/docs/per_cpu_hash_map.md | 10 + ebpf/aya-ebpf/src/maps/hash_map.rs | 583 +++++++++--------- ebpf/aya-ebpf/src/maps/map_safety.md | 32 + xtask/public-api/aya-ebpf.txt | 8 +- 7 files changed, 352 insertions(+), 310 deletions(-) create mode 100644 ebpf/aya-ebpf/src/maps/docs/hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md create mode 100644 ebpf/aya-ebpf/src/maps/map_safety.md diff --git a/ebpf/aya-ebpf/src/maps/docs/hash_map.md b/ebpf/aya-ebpf/src/maps/docs/hash_map.md new file mode 100644 index 00000000..f68cde04 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/hash_map.md @@ -0,0 +1,9 @@ + + + +A hash map that can be shared between eBPF programs and user-space. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 3.19. diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md new file mode 100644 index 00000000..a9bbd604 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_hash_map.md @@ -0,0 +1,10 @@ + + + +An LRU hash map that can be shared between eBPF programs and user-space. +When it reaches the capacity `M`, the least used element is evicted. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.10. diff --git a/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md new file mode 100644 index 00000000..1ddebdbb --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/lru_per_cpu_hash_map.md @@ -0,0 +1,10 @@ + + + +Similar to [`LruHashMap`] but each CPU holds a separate value for a given +key. Typically used to minimize lock contention in eBPF programs. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.10. diff --git a/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md new file mode 100644 index 00000000..33e62080 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/docs/per_cpu_hash_map.md @@ -0,0 +1,10 @@ + + + +Similar to [`HashMap`] but each CPU holds a separate value for a given key. +Typically used to minimize lock contention in eBPF programs. + +# Minimum kernel version + +The minimum kernel version required to use this feature is 4.6. diff --git a/ebpf/aya-ebpf/src/maps/hash_map.rs b/ebpf/aya-ebpf/src/maps/hash_map.rs index 1b34ac03..9a2e6c07 100644 --- a/ebpf/aya-ebpf/src/maps/hash_map.rs +++ b/ebpf/aya-ebpf/src/maps/hash_map.rs @@ -1,3 +1,7 @@ +//! Hash map types that can be shared between eBPF programs and user-space. + +#![deny(missing_docs)] + use core::{borrow::Borrow, cell::UnsafeCell, marker::PhantomData, mem}; use aya_ebpf_bindings::bindings::bpf_map_type::{ @@ -12,329 +16,296 @@ use crate::{ remove, }; -#[repr(transparent)] -pub struct HashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for HashMap {} - -impl HashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, +/// Generates a hash map definition with common methods. +macro_rules! hash_map { + ( + $map_doc:literal, + $map_doc_examples:literal, + $name:ident, + $t:ident + $(,)? + ) => { + #[doc = include_str!($map_doc)] + #[doc = $map_doc_examples] + #[repr(transparent)] + pub struct $name { + def: UnsafeCell, + _k: PhantomData, + _v: PhantomData, } - } - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, + unsafe impl Sync for $name {} + + impl $name { + /// Creates a new map with the given `max_entries` and `flags`. + pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { + Self { + def: UnsafeCell::new(build_def::( + $t, + max_entries, + flags, + PinningType::None, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + /// Creates a new map with the given `max_entries` and `flags` that + /// is pinned in the BPF filesystem in the directory designated by + /// [`EbpfLoader::map_pin_path`][map-pin-path]. + /// + /// [map-pin-path]: https://docs.rs/aya/latest/aya/struct.EbpfLoader.html#method.map_pin_path + pub const fn pinned(max_entries: u32, flags: u32) -> Self { + Self { + def: UnsafeCell::new(build_def::( + BPF_MAP_TYPE_HASH, + max_entries, + flags, + PinningType::ByName, + )), + _k: PhantomData, + _v: PhantomData, + } + } + + #[doc = "Retrieves the value associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { + unsafe { get(self.def.get(), key.borrow()) } + } + + #[doc = "Retrieves the pointer associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { + get_ptr(self.def.get(), key.borrow()) + } + + #[doc = "Retrieves the mutable pointer associated with `key` from the map."] + #[doc = include_str!("map_safety.md")] + #[inline] + pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { + get_ptr_mut(self.def.get(), key.borrow()) + } + + /// Inserts a key-value pair into the map. + #[inline] + pub fn insert( + &self, + key: impl Borrow, + value: impl Borrow, + flags: u64, + ) -> Result<(), c_long> { + insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) + } + + /// Removes a key from the map. + #[inline] + pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { + remove(self.def.get().cast(), key.borrow()) + } } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) - } + }; } -#[repr(transparent)] -pub struct LruHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} +hash_map!( + "docs/hash_map.md", + r#"# Examples -unsafe impl Sync for LruHashMap {} - -impl LruHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } +```rust,no_run +use aya_ebpf::{ + maps::HashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: HashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32 +> = HashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers an error. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); } } +```"#, + HashMap, + BPF_MAP_TYPE_HASH, +); +hash_map!( + "docs/lru_hash_map.md", + r#"# Examples + +```rust,no_run +use aya_ebpf::{ + maps::LruHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; -#[repr(transparent)] -pub struct PerCpuHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for PerCpuHashMap {} - -impl PerCpuHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_PERCPU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) - } - - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: LruHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32, + +> = LruHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers eviction of + // the least used elements. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); } } +```"#, + LruHashMap, + BPF_MAP_TYPE_LRU_HASH, +); +hash_map!( + "docs/per_cpu_hash_map.md", + r#"# Examples + +```rust,no_run +use aya_ebpf::{ + maps::PerCpuHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; -#[repr(transparent)] -pub struct LruPerCpuHashMap { - def: UnsafeCell, - _k: PhantomData, - _v: PhantomData, -} - -unsafe impl Sync for LruPerCpuHashMap {} - -impl LruPerCpuHashMap { - pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, - max_entries, - flags, - PinningType::None, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - pub const fn pinned(max_entries: u32, flags: u32) -> Self { - Self { - def: UnsafeCell::new(build_def::( - BPF_MAP_TYPE_LRU_PERCPU_HASH, - max_entries, - flags, - PinningType::ByName, - )), - _k: PhantomData, - _v: PhantomData, - } - } - - /// Retrieve the value associate with `key` from the map. - /// - /// # Safety - /// - /// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity - /// of `insert` or `remove`, and any element removed from the map might get aliased by another - /// element in the map, causing garbage to be read, or corruption in case of writes. - #[inline] - pub unsafe fn get(&self, key: impl Borrow) -> Option<&V> { - unsafe { get(self.def.get(), key.borrow()) } - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller - /// to decide whether it's safe to dereference the pointer or not. - #[inline] - pub fn get_ptr(&self, key: impl Borrow) -> Option<*const V> { - get_ptr(self.def.get(), key.borrow()) - } - - /// Retrieve the value associate with `key` from the map. - /// The same caveat as `get` applies, and additionally cares should be taken to avoid - /// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the - /// pointer or not. - #[inline] - pub fn get_ptr_mut(&self, key: impl Borrow) -> Option<*mut V> { - get_ptr_mut(self.def.get(), key.borrow()) - } - - #[inline] - pub fn insert( - &self, - key: impl Borrow, - value: impl Borrow, - flags: u64, - ) -> Result<(), c_long> { - insert(self.def.get().cast(), key.borrow(), value.borrow(), flags) +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: PerCpuHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32 +> = PerCpuHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers an error. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); } +} +```"#, + PerCpuHashMap, + BPF_MAP_TYPE_PERCPU_HASH +); +hash_map!( + "docs/lru_per_cpu_hash_map.md", + r#"# Examples + +```rust,no_run +use aya_ebpf::{ + maps::LruPerCpuHashMap, + macros::{map, tracepoint}, + programs::TracePointContext, + EbpfContext as _, +}; - #[inline] - pub fn remove(&self, key: impl Borrow) -> Result<(), c_long> { - remove(self.def.get().cast(), key.borrow()) +/// A hash map that counts syscalls issued by different processes. +#[map] +static COUNTER: LruPerCpuHashMap< + // PID. + u32, + // Count of syscalls issued by the given process. + u32, + +> = LruPerCpuHashMap::with_max_entries( + // Maximum number of elements. Reaching this capacity triggers eviction of + // the least used elements. + 10, + // Optional flags. + 0 +); + +/// A simple program attached to the `sys_enter` tracepoint that counts +/// syscalls. +#[tracepoint] +fn sys_enter(ctx: TracePointContext) { + let pid = ctx.pid(); + + if let Some(mut count) = COUNTER.get_ptr_mut(pid) { + unsafe { *count += 1 }; + } else { + COUNTER.insert( + pid, + // New value. + 1, + // Optional flags. + 0 + ); } } +```"#, + LruPerCpuHashMap, + BPF_MAP_TYPE_LRU_PERCPU_HASH +); const fn build_def(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { bpf_map_def { diff --git a/ebpf/aya-ebpf/src/maps/map_safety.md b/ebpf/aya-ebpf/src/maps/map_safety.md new file mode 100644 index 00000000..921a5215 --- /dev/null +++ b/ebpf/aya-ebpf/src/maps/map_safety.md @@ -0,0 +1,32 @@ +# Safety + +The pointer returned by a BPF map lookup is only stable until an update or +a delete. In the kernel’s default *preallocated* mode (no `BPF_F_NO_PREALLOC`), +deleted elements are immediately recycled onto a per-CPU freelist and may be +reused by another update before an RCU grace period elapses. Readers can +therefore observe aliasing (values changing underneath them) or, in rare cases, +false-positive lookups when an old and new key overlap. This behavior was +reported on [LKML in 2018][lkml-2018]. + +Using `BPF_F_NO_PREALLOC` historically forced RCU-delayed freeing, but since +the switch to `bpf_mem_alloc`, both prealloc and no-prealloc modes may recycle +elements quickly; the main distinction now is +[memory vs. allocation overhead][htab-atomic-overwrite]. + +The [official kernel docs][kernel-doc-map-hash] describe `BPF_F_NO_PREALLOC` as +a *memory-usage knob*, not a safety guarantee. + +Patches in 2020 mitigated some issues (e.g. +[zero-filling reused per-CPU slots][zero-filling]) but did not eliminate reuse +races. + +A 2023 patch by Alexei proposed a fallback scheme to +[delay reuse via RCU grace periods in certain conditions][reuse-delay] (rather +than always reusing immediately). However, this approach is not universally +applied, and immediate reuse is still considered a “known quirk” in many cases. + +[lkml-2018]: https://lore.kernel.org/lkml/CAG48ez1-WZH55+Wa2vgwZY_hpZJfnDxMzxGLtuN1hG1z6hKf5Q@mail.gmail.com/T/ +[htab-atomic-overwrite]: https://lore.kernel.org/bpf/20250204082848.13471-2-hotforest@gmail.com/T/ +[kernel-doc-map-hash]: https://www.kernel.org/doc/html/v6.10/bpf/map_hash.html +[zero-filling]: https://lore.kernel.org/all/20201104112332.15191-1-david.verbeiren@tessares.net/ +[reuse-delay]: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com/ diff --git a/xtask/public-api/aya-ebpf.txt b/xtask/public-api/aya-ebpf.txt index ea62c132..74317218 100644 --- a/xtask/public-api/aya-ebpf.txt +++ b/xtask/public-api/aya-ebpf.txt @@ -377,7 +377,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::insert(&self, key: impl pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -408,7 +408,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::insert(&self, key: impl co pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -1202,7 +1202,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::insert(&self, key: impl pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin @@ -1262,7 +1262,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::insert(&self, key: impl co pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::pinned(max_entries: u32, flags: u32) -> Self pub fn aya_ebpf::maps::hash_map::PerCpuHashMap::remove(&self, key: impl core::borrow::Borrow) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap::with_max_entries(max_entries: u32, flags: u32) -> Self -impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap +impl core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap impl !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap impl core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Send, V: core::marker::Send impl core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap where K: core::marker::Unpin, V: core::marker::Unpin