aya-ebpf: Reduce repetition and improve documentation of `maps::hash_map`

* The methods of all structs are almost identical, use a macro to reduce
  code repetition.
* Use the third person in all docstrings..
* Make use of `#[doc]` and split out the most repetetive chunks into
  separate files.
* Make the `Safety` comment for `get*` operations more clear, provide
  context and inks.
reviewable/pr1367/r7
Michal R 1 month ago
parent 29dc775535
commit 974386e4bc

@ -0,0 +1,9 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
A hash map that can be shared between eBPF programs and user space.
# Minimum kernel version
The minimum kernel version required to use this feature is 3.19.

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::HashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: HashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = HashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
An LRU hash map that can be shared between eBPF programs and user space.
When it reaches the capacity `M`, the least used element is evicted.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,45 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::LruHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
> = LruHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`LruHashMap`] but each CPU holds a separate value for a given
key. Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,44 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::LruPerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruPerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = LruPerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`HashMap`] but each CPU holds a separate value for a given key.
Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.6.

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::PerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: PerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = PerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -12,329 +12,118 @@ use crate::{
remove, remove,
}; };
#[repr(transparent)] /// Generates a hash map definition with common methods.
pub struct HashMap<K, V> { macro_rules! hash_map {
def: UnsafeCell<bpf_map_def>, (
_k: PhantomData<K>, $map_doc:literal,
_v: PhantomData<V>, $map_doc_examples:literal,
} $name:ident,
$t:ident
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {} $(,)?
) => {
impl<K, V> HashMap<K, V> { #[doc = include_str!($map_doc)]
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { #[doc = include_str!($map_doc_examples)]
Self { #[repr(transparent)]
def: UnsafeCell::new(build_def::<K, V>( pub struct $name<K, V> {
BPF_MAP_TYPE_HASH, def: UnsafeCell<bpf_map_def>,
max_entries, _k: PhantomData<K>,
flags, _v: PhantomData<V>,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
#[repr(transparent)]
pub struct LruHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
#[repr(transparent)]
pub struct PerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
} }
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self { unsafe impl<K: Sync, V: Sync> Sync for $name<K, V> {}
Self {
def: UnsafeCell::new(build_def::<K, V>( impl<K, V> $name<K, V> {
BPF_MAP_TYPE_PERCPU_HASH, pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
max_entries, Self {
flags, def: UnsafeCell::new(build_def::<K, V>(
PinningType::ByName, $t,
)), max_entries,
_k: PhantomData, flags,
_v: PhantomData, PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
#[doc = "Retrieves the value associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
#[doc = "Retrieves the pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
#[doc = "Retrieves the mutable pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
/// Inserts a key-value pair into the map.
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
/// Removes a key from the map.
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
} }
} };
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
} }
#[repr(transparent)] hash_map!(
pub struct LruPerCpuHashMap<K, V> { "docs/hash_map.md",
def: UnsafeCell<bpf_map_def>, "docs/hash_map_examples.md",
_k: PhantomData<K>, HashMap,
_v: PhantomData<V>, BPF_MAP_TYPE_HASH,
} );
hash_map!(
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {} "docs/lru_hash_map.md",
"docs/lru_hash_map_examples.md",
impl<K, V> LruPerCpuHashMap<K, V> { LruHashMap,
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self { BPF_MAP_TYPE_LRU_HASH,
Self { );
def: UnsafeCell::new(build_def::<K, V>( hash_map!(
BPF_MAP_TYPE_LRU_PERCPU_HASH, "docs/per_cpu_hash_map.md",
max_entries, "docs/per_cpu_hash_map_examples.md",
flags, PerCpuHashMap,
PinningType::None, BPF_MAP_TYPE_PERCPU_HASH
)), );
_k: PhantomData, hash_map!(
_v: PhantomData, "docs/lru_per_cpu_hash_map.md",
} "docs/lru_per_cpu_hash_map_examples.md",
} LruPerCpuHashMap,
BPF_MAP_TYPE_LRU_PERCPU_HASH
pub const fn pinned(max_entries: u32, flags: u32) -> Self { );
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def { const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def {
bpf_map_def { bpf_map_def {

@ -0,0 +1,32 @@
# Safety
The pointer returned by a BPF map lookup is only stable until a concurrent
update or delete. In the kernels default *preallocated* mode (no
`BPF_F_NO_PREALLOC`), deleted elements are immediately recycled onto a
per-CPU freelist and may be reused by another update before an RCU grace
period elapses. Readers can therefore observe aliasing (values changing
underneath them) or, in rare cases, false-positive lookups when an old and
new key overlap. This behavior was reported on [LKML in 2018][lkml-2018].
Using `BPF_F_NO_PREALLOC` historically forced RCU-delayed freeing, but since
the switch to `bpf_mem_alloc`, both prealloc and no-prealloc modes may
recycle elements quickly; the main distinction now is
[memory vs. allocation overhead][htab-atomic-overwrite].
The [official kernel docs][kernel-doc-map-hash] describe `BPF_F_NO_PREALLOC`
as a *memory-usage knob*, not a safety guarantee.
Patches in 2020 mitigated some issues (e.g.
[zero-filling reused per-CPU slots][zero-filling]) but did not eliminate reuse
races.
A 2023 patch by Alexei proposed a fallback scheme to
[delay reuse via RCU grace periods in certain conditions][reuse-delay] (rather
than always reusing immediately). However, this approach is not universally
applied, and immediate reuse is still considered a “known quirk” in many cases.
[lkml-2018]: https://lore.kernel.org/lkml/CAG48ez1-WZH55+Wa2vgwZY_hpZJfnDxMzxGLtuN1hG1z6hKf5Q@mail.gmail.com/T/
[htab-atomic-overwrite]: https://lore.kernel.org/bpf/20250204082848.13471-2-hotforest@gmail.com/T/
[kernel-doc-map-hash]: https://www.kernel.org/doc/html/v6.10/bpf/map_hash.html
[zero-filling]: https://lore.kernel.org/all/20201104112332.15191-1-david.verbeiren@tessares.net/
[reuse-delay]: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com/

@ -377,7 +377,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -408,7 +408,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1202,7 +1202,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1262,7 +1262,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long> pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin

Loading…
Cancel
Save