pull/1367/merge
Michal R 2 weeks ago committed by GitHub
commit 771fa62507
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -0,0 +1,42 @@
# Examples
```rust,no_run
use aya_ebpf::{
btf_maps::HashMap,
macros::{btf_map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[btf_map]
static COUNTER: HashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
> = HashMap::new();
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
btf_maps::LruHashMap,
macros::{btf_map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[btf_map]
static COUNTER: LruHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
> = LruHashMap::new();
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
btf_maps::LruPerCpuHashMap,
macros::{btf_map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[btf_map]
static COUNTER: LruPerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
> = LruPerCpuHashMap::new();
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,42 @@
# Examples
```rust,no_run
use aya_ebpf::{
btf_maps::PerCpuHashMap,
macros::{btf_map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[btf_map]
static COUNTER: PerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
> = PerCpuHashMap::new();
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,123 @@
use core::{borrow::Borrow, cell::UnsafeCell};
use crate::{
bindings::bpf_map_type::{
BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
BPF_MAP_TYPE_PERCPU_HASH,
},
btf_map_def,
cty::{c_long, c_void},
insert, lookup, remove,
};
/// Generates a hash map definition with common methods.
macro_rules! hash_map {
(
$map_doc:literal,
$map_doc_examples:literal,
$name:ident,
$def:ident
$(,)?
) => {
#[doc = include_str!($map_doc)]
#[doc = include_str!($map_doc_examples)]
#[repr(transparent)]
pub struct $name<K, V, const M: usize, const F: usize = 0>(UnsafeCell<$def<K, V, M, F>>);
unsafe impl<K: Sync, V: Sync, const M: usize, const F: usize> Sync for $name<K, V, M, F> {}
impl<K, V, const M: usize, const F: usize> $name<K, V, M, F> {
#[expect(
clippy::new_without_default,
reason = "BPF maps are always used as static variables, therefore this method has to be `const`. `Default::default` is not `const`."
)]
pub const fn new() -> Self {
Self(UnsafeCell::new($def::new()))
}
#[doc = "Retrieves the value associated with `key` from the map."]
#[doc = include_str!("../maps/map_safety.md")]
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.0.get().cast(), key.borrow()) }
}
#[doc = "Retrieves the pointer associated with `key` from the map."]
#[doc = include_str!("../maps/map_safety.md")]
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.0.get().cast(), key.borrow())
}
#[doc = "Retrieves the mutable pointer associated with `key` from the map."]
#[doc = include_str!("../maps/map_safety.md")]
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.0.get().cast(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.0.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.0.get().cast(), key.borrow())
}
}
};
}
btf_map_def!(HashMapDef, BPF_MAP_TYPE_HASH);
btf_map_def!(LruHashMapDef, BPF_MAP_TYPE_LRU_HASH);
btf_map_def!(PerCpuHashMapDef, BPF_MAP_TYPE_PERCPU_HASH);
btf_map_def!(LruPerCpuHashMapDef, BPF_MAP_TYPE_LRU_PERCPU_HASH);
hash_map!(
"../maps/docs/hash_map.md",
"docs/hash_map_examples.md",
HashMap,
HashMapDef,
);
hash_map!(
"../maps/docs/lru_hash_map.md",
"docs/lru_hash_map_examples.md",
LruHashMap,
LruHashMapDef,
);
hash_map!(
"../maps/docs/per_cpu_hash_map.md",
"docs/per_cpu_hash_map_examples.md",
PerCpuHashMap,
PerCpuHashMapDef,
);
hash_map!(
"../maps/docs/lru_per_cpu_hash_map.md",
"docs/lru_per_cpu_hash_map_examples.md",
LruPerCpuHashMap,
LruPerCpuHashMapDef,
);
#[inline]
unsafe fn get<'a, K, V>(def: *mut c_void, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| unsafe { &*p })
}
#[inline]
fn get_ptr_mut<K, V>(def: *mut c_void, key: &K) -> Option<*mut V> {
lookup(def, key).map(|p| p.as_ptr())
}
#[inline]
fn get_ptr<K, V>(def: *mut c_void, key: &K) -> Option<*const V> {
lookup::<_, V>(def.cast(), key).map(|p| p.as_ptr().cast_const())
}

@ -1,9 +1,11 @@
use core::marker::PhantomData;
pub mod array;
pub mod hash_map;
pub mod sk_storage;
pub use array::Array;
pub use hash_map::{HashMap, LruHashMap, LruPerCpuHashMap, PerCpuHashMap};
pub use sk_storage::SkStorage;
/// A marker used to remove names of annotated types in LLVM debug info and

@ -0,0 +1,9 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
A hash map that can be shared between eBPF programs and user space.
# Minimum kernel version
The minimum kernel version required to use this feature is 3.19.

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::HashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: HashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = HashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
An LRU hash map that can be shared between eBPF programs and user space.
When it reaches the capacity `M`, the least used element is evicted.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,45 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::LruHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32,
> = LruHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`LruHashMap`] but each CPU holds a separate value for a given
key. Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.10.

@ -0,0 +1,44 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::LruPerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: LruPerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = LruPerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers eviction of
// the least used elements.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -0,0 +1,10 @@
<!-- This is a Rust docstring which should not start with a top-level heading.
-->
<!-- markdownlint-disable MD041 -->
Similar to [`HashMap`] but each CPU holds a separate value for a given key.
Typically used to minimize lock contention in eBPF programs.
# Minimum kernel version
The minimum kernel version required to use this feature is 4.6.

@ -0,0 +1,43 @@
# Examples
```rust,no_run
use aya_ebpf::{
maps::PerCpuHashMap,
macros::{map, tracepoint},
programs::TracePointContext,
EbpfContext as _,
};
/// A hash map that counts syscalls issued by different processes.
#[map]
static COUNTER: PerCpuHashMap<
// PID.
u32,
// Count of syscalls issued by the given process.
u32
> = PerCpuHashMap::with_max_entries(
// Maximum number of elements. Reaching this capacity triggers an error.
10,
// Optional flags.
0
);
/// A simple program attached to the `sys_enter` tracepoint that counts
/// syscalls.
#[tracepoint]
fn sys_enter(ctx: TracePointContext) {
let pid = ctx.pid();
if let Some(mut count) = COUNTER.get_ptr_mut(pid) {
unsafe { *count += 1 };
} else {
COUNTER.insert(
pid,
// New value.
1,
// Optional flags.
0
);
}
}
```

@ -12,329 +12,118 @@ use crate::{
remove,
};
#[repr(transparent)]
pub struct HashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
#[repr(transparent)]
pub struct LruHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
#[repr(transparent)]
pub struct PerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
/// Generates a hash map definition with common methods.
macro_rules! hash_map {
(
$map_doc:literal,
$map_doc_examples:literal,
$name:ident,
$t:ident
$(,)?
) => {
#[doc = include_str!($map_doc)]
#[doc = include_str!($map_doc_examples)]
#[repr(transparent)]
pub struct $name<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
unsafe impl<K: Sync, V: Sync> Sync for $name<K, V> {}
impl<K, V> $name<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
$t,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
#[doc = "Retrieves the value associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
#[doc = "Retrieves the pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
#[doc = "Retrieves the mutable pointer associated with `key` from the map."]
#[doc = include_str!("map_safety.md")]
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
/// Inserts a key-value pair into the map.
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
/// Removes a key from the map.
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
};
}
#[repr(transparent)]
pub struct LruPerCpuHashMap<K, V> {
def: UnsafeCell<bpf_map_def>,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::None,
)),
_k: PhantomData,
_v: PhantomData,
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
flags,
PinningType::ByName,
)),
_k: PhantomData,
_v: PhantomData,
}
}
/// Retrieve the value associate with `key` from the map.
///
/// # Safety
///
/// Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not guarantee the atomicity
/// of `insert` or `remove`, and any element removed from the map might get aliased by another
/// element in the map, causing garbage to be read, or corruption in case of writes.
#[inline]
pub unsafe fn get(&self, key: impl Borrow<K>) -> Option<&V> {
unsafe { get(self.def.get(), key.borrow()) }
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
/// to decide whether it's safe to dereference the pointer or not.
#[inline]
pub fn get_ptr(&self, key: impl Borrow<K>) -> Option<*const V> {
get_ptr(self.def.get(), key.borrow())
}
/// Retrieve the value associate with `key` from the map.
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
/// pointer or not.
#[inline]
pub fn get_ptr_mut(&self, key: impl Borrow<K>) -> Option<*mut V> {
get_ptr_mut(self.def.get(), key.borrow())
}
#[inline]
pub fn insert(
&self,
key: impl Borrow<K>,
value: impl Borrow<V>,
flags: u64,
) -> Result<(), c_long> {
insert(self.def.get().cast(), key.borrow(), value.borrow(), flags)
}
#[inline]
pub fn remove(&self, key: impl Borrow<K>) -> Result<(), c_long> {
remove(self.def.get().cast(), key.borrow())
}
}
hash_map!(
"docs/hash_map.md",
"docs/hash_map_examples.md",
HashMap,
BPF_MAP_TYPE_HASH,
);
hash_map!(
"docs/lru_hash_map.md",
"docs/lru_hash_map_examples.md",
LruHashMap,
BPF_MAP_TYPE_LRU_HASH,
);
hash_map!(
"docs/per_cpu_hash_map.md",
"docs/per_cpu_hash_map_examples.md",
PerCpuHashMap,
BPF_MAP_TYPE_PERCPU_HASH
);
hash_map!(
"docs/lru_per_cpu_hash_map.md",
"docs/lru_per_cpu_hash_map_examples.md",
LruPerCpuHashMap,
BPF_MAP_TYPE_LRU_PERCPU_HASH
);
const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType) -> bpf_map_def {
bpf_map_def {

@ -0,0 +1,32 @@
# Safety
The pointer returned by a BPF map lookup is only stable until a concurrent
update or delete. In the kernels default *preallocated* mode (no
`BPF_F_NO_PREALLOC`), deleted elements are immediately recycled onto a
per-CPU freelist and may be reused by another update before an RCU grace
period elapses. Readers can therefore observe aliasing (values changing
underneath them) or, in rare cases, false-positive lookups when an old and
new key overlap. This behavior was reported on [LKML in 2018][lkml-2018].
Using `BPF_F_NO_PREALLOC` historically forced RCU-delayed freeing, but since
the switch to `bpf_mem_alloc`, both prealloc and no-prealloc modes may
recycle elements quickly; the main distinction now is
[memory vs. allocation overhead][htab-atomic-overwrite].
The [official kernel docs][kernel-doc-map-hash] describe `BPF_F_NO_PREALLOC`
as a *memory-usage knob*, not a safety guarantee.
Patches in 2020 mitigated some issues (e.g.
[zero-filling reused per-CPU slots][zero-filling]) but did not eliminate reuse
races.
A 2023 patch by Alexei proposed a fallback scheme to
[delay reuse via RCU grace periods in certain conditions][reuse-delay] (rather
than always reusing immediately). However, this approach is not universally
applied, and immediate reuse is still considered a “known quirk” in many cases.
[lkml-2018]: https://lore.kernel.org/lkml/CAG48ez1-WZH55+Wa2vgwZY_hpZJfnDxMzxGLtuN1hG1z6hKf5Q@mail.gmail.com/T/
[htab-atomic-overwrite]: https://lore.kernel.org/bpf/20250204082848.13471-2-hotforest@gmail.com/T/
[kernel-doc-map-hash]: https://www.kernel.org/doc/html/v6.10/bpf/map_hash.html
[zero-filling]: https://lore.kernel.org/all/20201104112332.15191-1-david.verbeiren@tessares.net/
[reuse-delay]: https://lore.kernel.org/bpf/20230706033447.54696-13-alexei.starovoitov@gmail.com/

@ -20,6 +20,10 @@ pub mod bpf_probe_read {
unsafe impl aya::Pod for TestResult {}
}
pub mod hash_map {
pub const GET_INDEX: u32 = 0;
}
pub mod log {
pub const BUF_LEN: usize = 1024;

@ -32,6 +32,10 @@ path = "src/array.rs"
name = "bpf_probe_read"
path = "src/bpf_probe_read.rs"
[[bin]]
name = "hash_map"
path = "src/hash_map.rs"
[[bin]]
name = "linear_data_structures"
path = "src/linear_data_structures.rs"

@ -0,0 +1,135 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
#[cfg(not(test))]
extern crate ebpf_panic;
use aya_ebpf::{
btf_maps::{Array, HashMap, LruHashMap, LruPerCpuHashMap, PerCpuHashMap},
cty::c_long,
macros::{btf_map, map, uprobe},
maps::{
Array as LegacyArray, HashMap as LegacyHashMap, LruHashMap as LegacyLruHashMap,
LruPerCpuHashMap as LegacyLruPerCpuHashMap, PerCpuHashMap as LegacyPerCpuHashMap,
},
programs::ProbeContext,
};
use integration_common::hash_map::GET_INDEX;
#[btf_map]
static RESULT: Array<u32, 3 /* max_elements */, 0> = Array::new();
#[btf_map]
static HASH_MAP: HashMap<u32, u32, 10 /* max_elements */, 0> = HashMap::new();
#[btf_map]
static LRU_HASH_MAP: LruHashMap<u32, u32, 10 /* max_elements */, 0> = LruHashMap::new();
#[btf_map]
static PER_CPU_HASH_MAP: PerCpuHashMap<u32, u32, 10 /* max_elements */, 0> = PerCpuHashMap::new();
#[btf_map]
static LRU_PER_CPU_HASH_MAP: LruPerCpuHashMap<u32, u32, 10 /* max_elements */, 0> =
LruPerCpuHashMap::new();
#[map]
static RESULT_LEGACY: LegacyArray<u32> = LegacyArray::with_max_entries(3, 0);
#[map]
static HASH_MAP_LEGACY: LegacyHashMap<u32, u32> = LegacyHashMap::with_max_entries(10, 0);
#[map]
static LRU_HASH_MAP_LEGACY: LegacyLruHashMap<u32, u32> = LegacyLruHashMap::with_max_entries(10, 0);
#[map]
static PER_CPU_HASH_MAP_LEGACY: LegacyPerCpuHashMap<u32, u32> =
LegacyPerCpuHashMap::with_max_entries(10, 0);
#[map]
static LRU_PER_CPU_HASH_MAP_LEGACY: LegacyLruPerCpuHashMap<u32, u32> =
LegacyLruPerCpuHashMap::with_max_entries(10, 0);
macro_rules! define_result_set {
(
$result_map:ident,
$result_set_fn:ident
) => {
#[inline(always)]
fn $result_set_fn(index: u32, value: u32) -> Result<(), c_long> {
let ptr = $result_map.get_ptr_mut(index).ok_or(-1)?;
let dst = unsafe { ptr.as_mut() };
let dst_res = dst.ok_or(-1)?;
*dst_res = value;
Ok(())
}
};
}
define_result_set!(RESULT, result_set);
define_result_set!(RESULT_LEGACY, result_set_legacy);
macro_rules! define_hash_map_test {
(
$hash_map:ident,
$result_set_fn:ident,
$insert_prog:ident,
$get_prog:ident
$(,)?
) => {
#[uprobe]
fn $insert_prog(ctx: ProbeContext) -> Result<(), c_long> {
let key = ctx.arg(0).ok_or(-1)?;
let value = ctx.arg(1).ok_or(-1)?;
$hash_map.insert(&key, &value, 0)?;
Ok(())
}
#[uprobe]
fn $get_prog(ctx: ProbeContext) -> Result<(), c_long> {
let key = ctx.arg(0).ok_or(-1)?;
let value = unsafe { $hash_map.get(&key).ok_or(-1)? };
$result_set_fn(GET_INDEX, *value)?;
Ok(())
}
};
}
define_hash_map_test!(HASH_MAP, result_set, hash_map_insert, hash_map_get);
define_hash_map_test!(
HASH_MAP_LEGACY,
result_set_legacy,
hash_map_insert_legacy,
hash_map_get_legacy,
);
define_hash_map_test!(
LRU_HASH_MAP,
result_set,
lru_hash_map_insert,
lru_hash_map_get
);
define_hash_map_test!(
LRU_HASH_MAP_LEGACY,
result_set_legacy,
lru_hash_map_insert_legacy,
lru_hash_map_get_legacy,
);
define_hash_map_test!(
PER_CPU_HASH_MAP,
result_set,
per_cpu_hash_map_insert,
per_cpu_hash_map_get,
);
define_hash_map_test!(
PER_CPU_HASH_MAP_LEGACY,
result_set_legacy,
per_cpu_hash_map_insert_legacy,
per_cpu_hash_map_get_legacy,
);
define_hash_map_test!(
LRU_PER_CPU_HASH_MAP,
result_set,
lru_per_cpu_hash_map_insert,
lru_per_cpu_hash_map_get,
);
define_hash_map_test!(
LRU_PER_CPU_HASH_MAP_LEGACY,
result_set_legacy,
lru_per_cpu_hash_map_insert_legacy,
lru_per_cpu_hash_map_get_legacy,
);

@ -27,6 +27,7 @@ integration-common = { path = "../integration-common", features = ["user"] }
libc = { workspace = true }
log = { workspace = true }
netns-rs = { workspace = true }
nix = { workspace = true, features = ["process", "sched"] }
object = { workspace = true, features = ["elf", "read_core", "std"] }
procfs = { workspace = true, features = ["flate2"] }
rand = { workspace = true, features = ["thread_rng"] }

@ -40,6 +40,7 @@ bpf_file!(
ARRAY => "array",
BPF_PROBE_READ => "bpf_probe_read",
HASH_MAP => "hash_map",
LINEAR_DATA_STRUCTURES => "linear_data_structures",
LOG => "log",
MAP_TEST => "map_test",

@ -3,6 +3,7 @@ mod bpf_probe_read;
mod btf_relocations;
mod elf;
mod feature_probe;
mod hash_map;
mod info;
mod iter;
mod linear_data_structures;

@ -0,0 +1,234 @@
use std::thread;
use aya::{
Ebpf, EbpfLoader,
maps::{Array, HashMap, MapData, MapError, PerCpuHashMap},
programs::UProbe,
};
use integration_common::hash_map::GET_INDEX;
/// Triggers the eBPF program that inserts the given `key` and `value` pair
/// into the hash map.
#[unsafe(no_mangle)]
#[inline(never)]
extern "C" fn hash_map_insert(key: u32, value: u32) {
std::hint::black_box((key, value));
}
/// Triggers the eBPF program that retrieves the value associated with the
/// `key` and inserts it into the array.
#[unsafe(no_mangle)]
#[inline(never)]
extern "C" fn hash_map_get(key: u32) {
std::hint::black_box(key);
}
/// Loads the uprobe program and attaches it to the given `symbol`.
fn load_program(ebpf: &mut Ebpf, prog_name: &str, symbol: &str) {
let prog: &mut UProbe = ebpf.program_mut(prog_name).unwrap().try_into().unwrap();
prog.load().unwrap();
prog.attach(symbol, "/proc/self/exe", None, None).unwrap();
}
/// Loads the pair of programs:
///
/// * `insert_prog` that inserts key and value pairs into the `hash_map`.
/// * `get_prog` that retrieves values from the `hash_map` and inserts them
/// into `result_map`.
///
/// Returns the result array and the hash map.
fn load_programs_with_maps<'a>(
ebpf: &'a mut Ebpf,
result_array: &'a str,
hash_map: &'a str,
insert_prog: &'a str,
get_prog: &'a str,
) -> (Array<&'a MapData, u32>, HashMap<&'a MapData, u32, u32>) {
load_program(ebpf, insert_prog, "hash_map_insert");
load_program(ebpf, get_prog, "hash_map_get");
let result_array = ebpf.map(result_array).unwrap();
let result_array = Array::<_, u32>::try_from(result_array).unwrap();
let hash_map = ebpf.map(hash_map).unwrap();
let hash_map = HashMap::<_, u32, u32>::try_from(hash_map).unwrap();
(result_array, hash_map)
}
/// Loads the `insert_prog` program that inserts elements into the
/// `per_cpu_hash_map`. Returns the map.
fn load_program_with_per_cpu_map<'a>(
ebpf: &'a mut Ebpf,
per_cpu_hash_map: &'a str,
insert_prog: &'a str,
) -> PerCpuHashMap<&'a MapData, u32, u32> {
load_program(ebpf, insert_prog, "hash_map_insert");
let hash_map = ebpf.map(per_cpu_hash_map).unwrap();
PerCpuHashMap::<_, u32, u32>::try_from(hash_map).unwrap()
}
#[test_log::test]
fn test_hash_map() {
let mut ebpf = EbpfLoader::new().load(crate::HASH_MAP).unwrap();
for (result_map_name, hash_map_name, insert_prog_name, get_prog_name) in [
// BTF map definitions.
("RESULT", "HASH_MAP", "hash_map_insert", "hash_map_get"),
// Legacy map definitions.
(
"RESULT_LEGACY",
"HASH_MAP_LEGACY",
"hash_map_insert_legacy",
"hash_map_get_legacy",
),
] {
let (result_array, hash_map) = load_programs_with_maps(
&mut ebpf,
result_map_name,
hash_map_name,
insert_prog_name,
get_prog_name,
);
let seq = 0_u32..9;
for i in seq.clone() {
hash_map_insert(i.pow(2), i);
}
for i in seq.clone() {
// Assert the value returned by user-space API.
let key = i.pow(2);
let value = hash_map.get(&key, 0).unwrap();
assert_eq!(value, i);
// Assert the value returned by eBPF in-kernel API.
hash_map_get(key);
let result = result_array.get(&GET_INDEX, 0).unwrap();
assert_eq!(result, i);
}
}
}
#[test_log::test]
fn test_lru_hash_map() {
let mut ebpf = EbpfLoader::new().load(crate::HASH_MAP).unwrap();
for (result_map_name, hash_map_name, insert_prog_name, get_prog_name) in [
// BTF map definitions.
(
"RESULT",
"LRU_HASH_MAP",
"lru_hash_map_insert",
"lru_hash_map_get",
),
// Legacy map definitions.
(
"RESULT_LEGACY",
"LRU_HASH_MAP_LEGACY",
"lru_hash_map_insert_legacy",
"lru_hash_map_get_legacy",
),
] {
let (result_array, hash_map) = load_programs_with_maps(
&mut ebpf,
result_map_name,
hash_map_name,
insert_prog_name,
get_prog_name,
);
// Insert elements over capacity.
let seq = 0_u32..15;
for i in seq.clone() {
hash_map_insert(i.pow(2), i);
}
// Check whether elements 0..5 got evicted.
for i in 0_u32..5 {
let key = i.pow(2);
assert!(matches!(hash_map.get(&key, 0), Err(MapError::KeyNotFound)));
}
// Check whether the newest 10 elements can be retrieved.
for i in 5_u32..15 {
// Assert the value returned by user-space API.
let key = i.pow(2);
let value = hash_map.get(&key, 0).unwrap();
assert_eq!(value, i);
// Assert the value returned by eBPF in-kernel API.
hash_map_get(key);
let result = result_array.get(&GET_INDEX, 0).unwrap();
assert_eq!(result, i);
}
}
}
#[test_log::test]
fn test_per_cpu_hash_map() {
let mut ebpf = EbpfLoader::new().load(crate::HASH_MAP).unwrap();
for (hash_map_name, insert_prog_name) in [
// BTF map definitions.
("PER_CPU_HASH_MAP", "per_cpu_hash_map_insert"),
// Legacy map definitions.
("PER_CPU_HASH_MAP_LEGACY", "per_cpu_hash_map_insert_legacy"),
] {
let hash_map = load_program_with_per_cpu_map(&mut ebpf, hash_map_name, insert_prog_name);
let seq = 0_u32..9;
thread::scope(|s| {
let seq = seq.clone();
s.spawn(move || {
let mut cpu_set = nix::sched::CpuSet::new();
cpu_set.set(0).unwrap();
nix::sched::sched_setaffinity(nix::unistd::Pid::from_raw(0), &cpu_set).unwrap();
for i in seq {
hash_map_insert(i.pow(2), i);
}
});
});
for i in seq.clone() {
let key = i.pow(2);
let values = hash_map.get(&key, 0).unwrap();
assert_eq!(values.first().unwrap(), &i);
}
}
}
#[test_log::test]
fn test_lru_per_cpu_hash_map() {
let mut ebpf = EbpfLoader::new().load(crate::HASH_MAP).unwrap();
for (hash_map_name, insert_prog_name) in [
// BTF map definitions.
("LRU_PER_CPU_HASH_MAP", "lru_per_cpu_hash_map_insert"),
// Legacy map definitions.
(
"LRU_PER_CPU_HASH_MAP_LEGACY",
"lru_per_cpu_hash_map_insert_legacy",
),
] {
let hash_map = load_program_with_per_cpu_map(&mut ebpf, hash_map_name, insert_prog_name);
// Insert elements over capacity.
let seq = 0_u32..15;
thread::scope(|s| {
let seq = seq.clone();
s.spawn(move || {
let mut cpu_set = nix::sched::CpuSet::new();
cpu_set.set(0).unwrap();
nix::sched::sched_setaffinity(nix::unistd::Pid::from_raw(0), &cpu_set).unwrap();
for i in seq {
hash_map_insert(i.pow(2), i);
}
});
});
// Check whether elements 0..5 got evicted.
for i in 0_u32..5 {
let key = i.pow(2);
assert!(matches!(hash_map.get(&key, 0), Err(MapError::KeyNotFound)));
}
// Check whether the newest 10 elements can be retrieved.
for i in 5_u32..15 {
let key = i.pow(2);
let values = hash_map.get(&key, 0).unwrap();
assert_eq!(values.first().unwrap(), &i);
}
}
}

@ -58,6 +58,227 @@ impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::array::ArrayDef<K, V,
pub fn aya_ebpf::btf_maps::array::ArrayDef<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::array::ArrayDef<K, V, M, F>
pub fn aya_ebpf::btf_maps::array::ArrayDef<K, V, M, F>::from(t: T) -> T
pub mod aya_ebpf::btf_maps::hash_map
#[repr(transparent)] pub struct aya_ebpf::btf_maps::hash_map::HashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::from(t: T) -> T
pub struct aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, const M: usize, const F: usize>
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
pub const fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::new() -> aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Freeze for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Sync for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::HashMapDef<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::from(t: T) -> T
pub struct aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, const M: usize, const F: usize>
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
pub const fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::new() -> aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMapDef<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::from(t: T) -> T
pub struct aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, const M: usize, const F: usize>
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
pub const fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::new() -> aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMapDef<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::from(t: T) -> T
pub struct aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, const M: usize, const F: usize>
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
pub const fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::new() -> aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Freeze for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Sync for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMapDef<K, V, M, F>::from(t: T) -> T
pub mod aya_ebpf::btf_maps::sk_storage
#[repr(transparent)] pub struct aya_ebpf::btf_maps::sk_storage::SkStorage<T>(_)
impl<T> aya_ebpf::btf_maps::sk_storage::SkStorage<T>
@ -141,6 +362,126 @@ impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::array::Array<T, M, F>
pub fn aya_ebpf::btf_maps::array::Array<T, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::array::Array<T, M, F>
pub fn aya_ebpf::btf_maps::array::Array<T, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::HashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::HashMap<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::LruHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruHashMap<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::LruPerCpuHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::LruPerCpuHashMap<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::PerCpuHashMap<K, V, const M: usize, const F: usize>(_)
impl<K, V, const M: usize, const F: usize> aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
pub unsafe fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<&V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get_ptr(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*const V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::get_ptr_mut(&self, key: impl core::borrow::Borrow<K>) -> core::option::Option<*mut V>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::insert(&self, key: impl core::borrow::Borrow<K>, value: impl core::borrow::Borrow<V>, flags: u64) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::new() -> Self
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
impl<K: core::marker::Sync, V: core::marker::Sync, const M: usize, const F: usize> core::marker::Sync for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Freeze for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::marker::Send for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::marker::Unpin for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> !core::panic::unwind_safe::RefUnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
impl<K, V, const M: usize, const F: usize> core::panic::unwind_safe::UnwindSafe for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where K: core::panic::unwind_safe::RefUnwindSafe, V: core::panic::unwind_safe::RefUnwindSafe
impl<T, U> core::convert::Into<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::From<T>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::Into<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::Error = core::convert::Infallible
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where U: core::convert::TryFrom<T>
pub type aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: 'static + ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F> where T: ?core::marker::Sized
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>
pub fn aya_ebpf::btf_maps::hash_map::PerCpuHashMap<K, V, M, F>::from(t: T) -> T
#[repr(transparent)] pub struct aya_ebpf::btf_maps::SkStorage<T>(_)
impl<T> aya_ebpf::btf_maps::sk_storage::SkStorage<T>
pub unsafe fn aya_ebpf::btf_maps::sk_storage::SkStorage<T>::delete(&self, sk: *mut aya_ebpf_bindings::x86_64::bindings::bpf_sock) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
@ -377,7 +718,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -408,7 +749,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1202,7 +1543,7 @@ pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::insert(&self, key: impl
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::LruPerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin
@ -1262,7 +1603,7 @@ pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::insert(&self, key: impl co
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::pinned(max_entries: u32, flags: u32) -> Self
pub fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::remove(&self, key: impl core::borrow::Borrow<K>) -> core::result::Result<(), aya_ebpf_cty::od::c_long>
pub const fn aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>::with_max_entries(max_entries: u32, flags: u32) -> Self
impl<K, V> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K: core::marker::Sync, V: core::marker::Sync> core::marker::Sync for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> !core::marker::Freeze for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V>
impl<K, V> core::marker::Send for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Send, V: core::marker::Send
impl<K, V> core::marker::Unpin for aya_ebpf::maps::hash_map::PerCpuHashMap<K, V> where K: core::marker::Unpin, V: core::marker::Unpin

Loading…
Cancel
Save