aya-ebpf: Reduce repetition and improve documentation of `maps::hash_map`

* The methods of all structs are almost identical, use a macro to reduce
  code repetition.
* Use the third person in all docstrings..
* Make use of `#[doc]` and split out the most repetetive chunks into
  separate files.
* Make the `Safety` comment for `get*` operations more clear, provide
  context and inks.
reviewable/pr1367/r9
Michal R 1 month ago
parent 03e8487177
commit 3a7d1bebb4

@ -0,0 +1,9 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
A hash map that can be shared between eBPF programs and user-space.
# Minimum kernel version
The minimum kernel version required to use this feature is 3.19.

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
An LRU hash map that can be shared between eBPF programs and user-space.
When it reaches the capacity `M`, the least used element is evicted.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`LruHashMap`] but each CPU holds a separate value for a given
key. Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`HashMap`] but each CPU holds a separate value for a given key.
Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.6.

@ -1,3 +1,7 @@
//! Hash map types that can be shared between eBPF programs and user-space.
#![deny(missing_docs)]
use core::{borrow::Borrow, cell::UnsafeCell, marker::PhantomData, mem};
use aya_ebpf_bindings::bindings::bpf_map_type::{
@ -12,329 +16,296 @@ use crate::{
remove,
};
#[repr(transparent)]
pub struct HashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
/// Generates a hash map definition with common methods.
macro_rules! hash_map {
(
$map_doc:literal,
$map_doc_examples:literal,
$name:ident,
$t:ident
$(,)?
) => {
#[doc = include_str!($map_doc)]
#[doc = $map_doc_examples]
#[repr(transparent)]
pub struct $name<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
unsafe impl<K: Sync, V: Sync> Sync for $name<K, V> {}
impl<K, V> $name<K, V> {
/// Creates a new map with the given `max_entries` and `flags`.
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
$t,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Creates a new map with the given `max_entries` and `flags` that
/// is pinned in the BPF filesystem in the directory designated by
/// [`EbpfLoader::map_pin_path`][map-pin-path].
///
/// [map-pin-path]: https://docs.rs/aya/latest/aya/struct.EbpfLoader.html#method.map_pin_path
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
#[doc = "Retrieves the value associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
#[doc = "Retrieves the pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
#[doc = "Retrieves the mutable pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
/// Inserts a key-value pair into the map.
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
/// Removes a key from the map.
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
};
}
#[repr(transparent)]
pub struct LruHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
hash_map!(
"docs/hash_map.md",
r#"# Examples
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
```rust,no_run
use aya_ebpf::{
maps::HashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: HashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = HashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```"#,
HashMap,
BPF_MAP_TYPE_HASH,
);
hash_map!(
"docs/lru_hash_map.md",
r#"# Examples
```rust,no_run
use aya_ebpf::{
maps::LruHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
#[repr(transparent)]
pub struct PerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
> = LruHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```"#,
LruHashMap,
BPF_MAP_TYPE_LRU_HASH,
);
hash_map!(
"docs/per_cpu_hash_map.md",
r#"# Examples
```rust,no_run
use aya_ebpf::{
maps::PerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
#[repr(transparent)]
pub struct LruPerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: PerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = PerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```"#,
PerCpuHashMap,
BPF_MAP_TYPE_PERCPU_HASH
);
hash_map!(
"docs/lru_per_cpu_hash_map.md",
r#"# Examples
```rust,no_run
use aya_ebpf::{
maps::LruPerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruPerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
> = LruPerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```"#,
LruPerCpuHashMap,
BPF_MAP_TYPE_LRU_PERCPU_HASH
);
const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def {
bpf_map_def {

@ -0,0 +1,32 @@
# Safety
The pointer returned by a BPF map lookup is only stable until an update or
a delete. In the kernels default *preallocated* mode (no `BPF_F_NO_PREALLOC`),
deleted elements are immediately recycled onto a per-CPU freelist and may be
reused by another update before an RCU grace period elapses. Readers can
therefore observe aliasing (values changing underneath them) or, in rare cases,
false-positive lookups when an old and new key overlap. This behavior was
reported on [LKML in 2018][lkml-2018].
Using `BPF_F_NO_PREALLOC` historically forced RCU-delayed freeing, but since
the switch to `bpf_mem_alloc`, both prealloc and no-prealloc modes may recycle
elements quickly; the main distinction now is
[memory vs. allocation overhead][htab-atomic-overwrite].
The [official kernel docs][kernel-doc-map-hash] describe `BPF_F_NO_PREALLOC` as
a *memory-usage knob*, not a safety guarantee.
Patches in 2020 mitigated some issues (e.g.
[zero-filling reused per-CPU slots][zero-filling]) but did not eliminate reuse
races.
A 2023 patch by Alexei proposed a fallback scheme to
[delay reuse via RCU grace periods in certain conditions][reuse-delay] (rather
than always reusing immediately). However, this approach is not universally
applied, and immediate reuse is still considered a “known quirk” in many cases.
[lkml-2018]: https://lore.kernel.org/lkml/CAG48ez1-WZH55+Wa2vgwZY_hpZJfnDxMzxGLtuN1hG1z6hKf5Q@mail.gmail.com/T/
[htab-atomic-overwrite]: https://lore.kernel.org/bpf/20250204082848.13471-2-hotforest@gmail.com/T/
[kernel-doc-map-hash]: https://www.kernel.org/doc/html/v6.10/bpf/map_hash.html
[zero-filling]: https://lore.kernel.org/all/20201104112332.15191-1-david.verbeiren@tessares.net/
[reuse-delay]: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com/

@ -377,7 +377,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -408,7 +408,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1202,7 +1202,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1262,7 +1262,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin

Loading…
Cancel
Save