mirror of https://github.com/aya-rs/aya
feat(aya-ebpf): BTF maps
Before this change, Aya supported only legacy BPF map definitions, which are instances of the `bpf_map_def` struct and end up in the `maps` ELF section. This change introduces BTF maps, with custom structs indicating the metadata of the map, which end up in the `.maps` section. Legacy maps are not supported by libbpf anymore and not even by the kernel for newer types of maps like inode/task storage. Add support of BTF maps in aya-ebpf under the `btf-maps` feature flag. Usage of this feature requires emitting debug info for the eBPF crate and passing the `--btf` flag to bpf-linker.
parent
5a43bedc01
commit
d102383d31
@ -0,0 +1,73 @@
|
||||
use proc_macro2::TokenStream;
|
||||
use quote::quote;
|
||||
use syn::{ItemStatic, Result};
|
||||
|
||||
use crate::args::name_arg;
|
||||
|
||||
pub(crate) struct BtfMap {
|
||||
item: ItemStatic,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl BtfMap {
|
||||
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<BtfMap> {
|
||||
let item: ItemStatic = syn::parse2(item)?;
|
||||
let mut args = syn::parse2(attrs)?;
|
||||
let name = name_arg(&mut args).unwrap_or_else(|| item.ident.to_string());
|
||||
Ok(BtfMap { item, name })
|
||||
}
|
||||
|
||||
pub(crate) fn expand(&self) -> TokenStream {
|
||||
let section_name = ".maps";
|
||||
let name = &self.name;
|
||||
let item = &self.item;
|
||||
quote! {
|
||||
#[link_section = #section_name]
|
||||
#[export_name = #name]
|
||||
#item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use syn::parse_quote;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_map_with_name() {
|
||||
let map = BtfMap::parse(
|
||||
parse_quote!(name = "foo"),
|
||||
parse_quote!(
|
||||
static BAR: HashMap<&'static str, u32> = HashMap::new();
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
let expanded = map.expand();
|
||||
let expected = quote!(
|
||||
#[link_section = ".maps"]
|
||||
#[export_name = "foo"]
|
||||
static BAR: HashMap<&'static str, u32> = HashMap::new();
|
||||
);
|
||||
assert_eq!(expected.to_string(), expanded.to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map_no_name() {
|
||||
let map = BtfMap::parse(
|
||||
parse_quote!(),
|
||||
parse_quote!(
|
||||
static BAR: HashMap<&'static str, u32> = HashMap::new();
|
||||
),
|
||||
)
|
||||
.unwrap();
|
||||
let expanded = map.expand();
|
||||
let expected = quote!(
|
||||
#[link_section = ".maps"]
|
||||
#[export_name = "BAR"]
|
||||
static BAR: HashMap<&'static str, u32> = HashMap::new();
|
||||
);
|
||||
assert_eq!(expected.to_string(), expanded.to_string());
|
||||
}
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
use core::{cell::UnsafeCell, ptr::NonNull};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_ARRAY, btf_map_def, cty::c_void,
|
||||
helpers::bpf_map_lookup_elem,
|
||||
};
|
||||
|
||||
btf_map_def!(ArrayDef, BPF_MAP_TYPE_ARRAY);
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct Array<T, const M: usize, const F: usize = 0>(UnsafeCell<ArrayDef<u32, T, M, F>>);
|
||||
|
||||
unsafe impl<T: Sync, const M: usize, const F: usize> Sync for Array<T, M, F> {}
|
||||
|
||||
impl<T, const M: usize, const F: usize> Array<T, M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Array(UnsafeCell::new(ArrayDef::new()))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get(&self, index: u32) -> Option<&T> {
|
||||
// FIXME: alignment
|
||||
unsafe { self.lookup(index).map(|p| p.as_ref()) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_ptr(&self, index: u32) -> Option<*const T> {
|
||||
unsafe { self.lookup(index).map(|p| p.as_ptr() as *const T) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_ptr_mut(&self, index: u32) -> Option<*mut T> {
|
||||
unsafe { self.lookup(index).map(|p| p.as_ptr()) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn lookup(&self, index: u32) -> Option<NonNull<T>> {
|
||||
let ptr = bpf_map_lookup_elem(self.0.get() as *mut _, &index as *const _ as *const c_void);
|
||||
NonNull::new(ptr as *mut T)
|
||||
}
|
||||
}
|
@ -0,0 +1,60 @@
|
||||
use core::{cell::UnsafeCell, ptr};
|
||||
|
||||
use aya_ebpf_bindings::helpers::{bpf_map_peek_elem, bpf_map_push_elem};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER, btf_maps::AyaBtfMapMarker, cty::c_void,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct BloomFilterDef<T, const M: usize, const H: usize = 5, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_BLOOM_FILTER as usize],
|
||||
value: *const T,
|
||||
max_entries: *const [i32; M],
|
||||
map_extra: *const [i32; H],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct BloomFilter<T, const M: usize, const H: usize = 5, const F: usize = 0>(
|
||||
UnsafeCell<BloomFilterDef<T, M, H, F>>,
|
||||
);
|
||||
|
||||
impl<T, const M: usize, const H: usize, const F: usize> BloomFilter<T, M, H, F> {
|
||||
pub const fn new() -> Self {
|
||||
BloomFilter(UnsafeCell::new(BloomFilterDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_BLOOM_FILTER as usize] as *const _,
|
||||
value: ptr::null(),
|
||||
max_entries: &[0i32; M] as *const _,
|
||||
map_extra: &[0i32; H] as *const _,
|
||||
map_flags: &[0i32; F] as *const _,
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn contains(&mut self, value: &T) -> Result<(), i64> {
|
||||
let ret = unsafe {
|
||||
bpf_map_peek_elem(
|
||||
&mut self.0.get() as *mut _ as *mut _,
|
||||
value as *const _ as *mut c_void,
|
||||
)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert(&mut self, value: &T, flags: u64) -> Result<(), i64> {
|
||||
let ret = unsafe {
|
||||
bpf_map_push_elem(
|
||||
&mut self.0.get() as *mut _ as *mut _,
|
||||
value as *const _ as *const _,
|
||||
flags,
|
||||
)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
use core::{cell::UnsafeCell, ptr::NonNull};
|
||||
|
||||
use aya_ebpf_bindings::bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH;
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::{BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_LRU_HASH},
|
||||
btf_map_def,
|
||||
cty::{c_long, c_void},
|
||||
helpers::{bpf_map_delete_elem, bpf_map_lookup_elem, bpf_map_update_elem},
|
||||
};
|
||||
|
||||
btf_map_def!(HashMapDef, BPF_MAP_TYPE_HASH);
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct HashMap<K, V, const M: usize, const F: usize = 0>(UnsafeCell<HashMapDef<K, V, M, F>>);
|
||||
|
||||
unsafe impl<K: Sync, V: Sync, const M: usize, const F: usize> Sync for HashMap<K, V, M, F> {}
|
||||
|
||||
impl<K, V, const M: usize, const F: usize> HashMap<K, V, M, F> {
|
||||
pub const fn new() -> HashMap<K, V, M, F> {
|
||||
HashMap(UnsafeCell::new(HashMapDef::new()))
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
||||
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
||||
/// map might get aliased by another element in the map, causing garbage to be read, or
|
||||
/// corruption in case of writes.
|
||||
#[inline]
|
||||
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
||||
get(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
||||
/// to decide whether it's safe to dereference the pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
||||
get_ptr(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
||||
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
||||
/// pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
||||
get_ptr_mut(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
||||
insert(self.0.get() as _, key, value, flags)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
||||
remove(self.0.get() as _, key)
|
||||
}
|
||||
}
|
||||
|
||||
btf_map_def!(LruHashMapDef, BPF_MAP_TYPE_LRU_HASH);
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct LruHashMap<K, V, const M: usize, const F: usize = 0>(
|
||||
UnsafeCell<LruHashMapDef<K, V, M, F>>,
|
||||
);
|
||||
|
||||
unsafe impl<K: Sync, V: Sync, const M: usize, const F: usize> Sync for LruHashMap<K, V, M, F> {}
|
||||
|
||||
impl<K, V, const M: usize, const F: usize> LruHashMap<K, V, M, F> {
|
||||
pub const fn new() -> LruHashMap<K, V, M, F> {
|
||||
LruHashMap(UnsafeCell::new(LruHashMapDef::new()))
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
||||
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
||||
/// map might get aliased by another element in the map, causing garbage to be read, or
|
||||
/// corruption in case of writes.
|
||||
#[inline]
|
||||
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
||||
get(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
||||
/// to decide whether it's safe to dereference the pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
||||
get_ptr(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
||||
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
||||
/// pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
||||
get_ptr_mut(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
||||
insert(self.0.get() as _, key, value, flags)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
||||
remove(self.0.get() as _, key)
|
||||
}
|
||||
}
|
||||
|
||||
btf_map_def!(PerCpuHashMapDef, BPF_MAP_TYPE_PERCPU_HASH);
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct PerCpuHashMap<K, V, const M: usize, const F: usize = 0>(
|
||||
UnsafeCell<PerCpuHashMapDef<K, V, M, F>>,
|
||||
);
|
||||
|
||||
unsafe impl<K, V, const M: usize, const F: usize> Sync for PerCpuHashMap<K, V, M, F> {}
|
||||
|
||||
impl<K, V, const M: usize, const F: usize> PerCpuHashMap<K, V, M, F> {
|
||||
pub const fn new() -> PerCpuHashMap<K, V, M, F> {
|
||||
PerCpuHashMap(UnsafeCell::new(PerCpuHashMapDef::new()))
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// This function is unsafe. Unless the map flag `BPF_F_NO_PREALLOC` is used, the kernel does not
|
||||
/// make guarantee on the atomicity of `insert` or `remove`, and any element removed from the
|
||||
/// map might get aliased by another element in the map, causing garbage to be read, or
|
||||
/// corruption in case of writes.
|
||||
#[inline]
|
||||
pub unsafe fn get(&self, key: &K) -> Option<&V> {
|
||||
get(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, but this returns a raw pointer and it's up to the caller
|
||||
/// to decide whether it's safe to dereference the pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr(&self, key: &K) -> Option<*const V> {
|
||||
get_ptr(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
/// Retrieve the value associate with `key` from the map.
|
||||
/// The same caveat as `get` applies, and additionally cares should be taken to avoid
|
||||
/// concurrent writes, but it's up to the caller to decide whether it's safe to dereference the
|
||||
/// pointer or not.
|
||||
#[inline]
|
||||
pub fn get_ptr_mut(&self, key: &K) -> Option<*mut V> {
|
||||
get_ptr_mut(self.0.get() as _, key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn insert(&self, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
||||
insert(self.0.get() as _, key, value, flags)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn remove(&self, key: &K) -> Result<(), c_long> {
|
||||
remove(self.0.get() as _, key)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_ptr_mut<K, V>(def: *mut c_void, key: &K) -> Option<*mut V> {
|
||||
unsafe {
|
||||
let value = bpf_map_lookup_elem(def as *mut _, key as *const _ as *const c_void);
|
||||
// FIXME: alignment
|
||||
NonNull::new(value as *mut V).map(|p| p.as_ptr())
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn get_ptr<K, V>(def: *mut c_void, key: &K) -> Option<*const V> {
|
||||
get_ptr_mut(def, key).map(|p| p as *const V)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn get<'a, K, V>(def: *mut c_void, key: &K) -> Option<&'a V> {
|
||||
get_ptr(def, key).map(|p| &*p)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn insert<K, V>(def: *mut c_void, key: &K, value: &V, flags: u64) -> Result<(), c_long> {
|
||||
let ret = unsafe {
|
||||
bpf_map_update_elem(
|
||||
def as *mut _,
|
||||
key as *const _ as *const _,
|
||||
value as *const _ as *const _,
|
||||
flags,
|
||||
)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn remove<K>(def: *mut c_void, key: &K) -> Result<(), c_long> {
|
||||
let ret = unsafe { bpf_map_delete_elem(def as *mut _, key as *const _ as *const c_void) };
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
// use core::mem;
|
||||
//
|
||||
// use crate::bindings::bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER;
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// pub struct LpmTrieDef<K, V, const M: usize, const F: usize = 0> {
|
||||
// r#type: *const [i32; BPF_MAP_TYPE_BLOOM_FILTER as usize],
|
||||
// key_size: *const [i32; mem::size_of::<Key<K>>()],
|
||||
// value_size: *const [i32; mem::size_of::<V>()],
|
||||
// max_entries: *const [i32; M],
|
||||
// map_flags: *const [i32; F],
|
||||
// }
|
||||
|
||||
#[repr(packed)]
|
||||
pub struct Key<K> {
|
||||
/// Represents the number of bits matched against.
|
||||
pub prefix_len: u32,
|
||||
/// Represents arbitrary data stored in the LpmTrie.
|
||||
pub data: K,
|
||||
}
|
||||
|
||||
impl<K> Key<K> {
|
||||
pub fn new(prefix_len: u32, data: K) -> Self {
|
||||
Self { prefix_len, data }
|
||||
}
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
#![cfg(feature = "btf-maps")]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
|
||||
pub mod array;
|
||||
pub mod bloom_filter;
|
||||
pub mod hash_map;
|
||||
pub mod lpm_trie;
|
||||
pub mod per_cpu_array;
|
||||
pub mod perf;
|
||||
pub mod program_array;
|
||||
pub mod queue;
|
||||
pub mod ring_buf;
|
||||
pub mod sock_hash;
|
||||
pub mod sock_map;
|
||||
pub mod stack;
|
||||
pub mod stack_trace;
|
||||
pub mod xdp;
|
||||
|
||||
pub use array::Array;
|
||||
pub use bloom_filter::BloomFilter;
|
||||
pub use hash_map::{HashMap, LruHashMap, PerCpuHashMap};
|
||||
pub use per_cpu_array::PerCpuArray;
|
||||
pub use perf::{PerfEventArray, PerfEventByteArray};
|
||||
pub use program_array::ProgramArray;
|
||||
pub use queue::Queue;
|
||||
pub use ring_buf::RingBuf;
|
||||
pub use sock_hash::SockHash;
|
||||
pub use sock_map::SockMap;
|
||||
pub use stack::Stack;
|
||||
pub use stack_trace::StackTrace;
|
||||
pub use xdp::{CpuMap, DevMap, DevMapHash, XskMap};
|
||||
|
||||
/// A marker used to remove names of annotated types in LLVM debug info and
|
||||
/// therefore also in BTF.
|
||||
///
|
||||
/// # Example
|
||||
#[repr(transparent)]
|
||||
pub(crate) struct AyaBtfMapMarker(PhantomData<()>);
|
||||
|
||||
impl AyaBtfMapMarker {
|
||||
pub(crate) const fn new() -> Self {
|
||||
Self(PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! btf_map_def {
|
||||
($name:ident, $t:ident) => {
|
||||
#[allow(dead_code)]
|
||||
pub struct $name<K, V, const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; $t as usize],
|
||||
key: *const K,
|
||||
value: *const V,
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: $crate::btf_maps::AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
impl<K, V, const M: usize, const F: usize> $name<K, V, M, F> {
|
||||
pub const fn new() -> $name<K, V, M, F> {
|
||||
$name {
|
||||
r#type: &[0i32; $t as usize],
|
||||
key: ::core::ptr::null(),
|
||||
value: ::core::ptr::null(),
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: $crate::btf_maps::AyaBtfMapMarker::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
use core::{cell::UnsafeCell, ptr::NonNull};
|
||||
|
||||
use aya_ebpf_bindings::helpers::bpf_map_lookup_elem;
|
||||
|
||||
use crate::{bindings::bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY, btf_map_def, cty::c_void};
|
||||
|
||||
btf_map_def!(PerCpuArrayDef, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct PerCpuArray<T, const M: usize, const F: usize = 0>(
|
||||
UnsafeCell<PerCpuArrayDef<u32, T, M, F>>,
|
||||
);
|
||||
|
||||
unsafe impl<T: Sync, const M: usize, const F: usize> Sync for PerCpuArray<T, M, F> {}
|
||||
|
||||
impl<T, const M: usize, const F: usize> PerCpuArray<T, M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(PerCpuArrayDef::new()))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get(&self, index: u32) -> Option<&T> {
|
||||
unsafe {
|
||||
// FIXME: alignment
|
||||
self.lookup(index).map(|p| p.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_ptr(&self, index: u32) -> Option<*const T> {
|
||||
unsafe { self.lookup(index).map(|p| p.as_ptr() as *const T) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn get_ptr_mut(&self, index: u32) -> Option<*mut T> {
|
||||
unsafe { self.lookup(index).map(|p| p.as_ptr()) }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn lookup(&self, index: u32) -> Option<NonNull<T>> {
|
||||
let ptr = bpf_map_lookup_elem(self.0.get() as *mut _, &index as *const _ as *const c_void);
|
||||
NonNull::new(ptr as *mut T)
|
||||
}
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
use core::mem;
|
||||
|
||||
mod perf_event_array;
|
||||
mod perf_event_byte_array;
|
||||
|
||||
pub use perf_event_array::PerfEventArray;
|
||||
pub use perf_event_byte_array::PerfEventByteArray;
|
||||
|
||||
use crate::{bindings::bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY, btf_maps::AyaBtfMapMarker};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct PerfEventArrayDef<const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_PERF_EVENT_ARRAY as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<u32>()],
|
||||
max_entries: *const [i32; 0],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
impl<const F: usize> PerfEventArrayDef<F> {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_PERF_EVENT_ARRAY as usize],
|
||||
key_size: &[0i32; mem::size_of::<u32>()],
|
||||
value_size: &[0i32; mem::size_of::<u32>()],
|
||||
max_entries: &[0i32; 0],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
use core::{cell::UnsafeCell, marker::PhantomData, mem};
|
||||
|
||||
use crate::{
|
||||
bindings::BPF_F_CURRENT_CPU, btf_maps::perf::PerfEventArrayDef, helpers::bpf_perf_event_output,
|
||||
EbpfContext,
|
||||
};
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct PerfEventArray<T, const F: usize = 0> {
|
||||
def: UnsafeCell<PerfEventArrayDef<F>>,
|
||||
_t: PhantomData<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Sync, const F: usize> Sync for PerfEventArray<T, F> {}
|
||||
|
||||
impl<T, const F: usize> PerfEventArray<T, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self {
|
||||
def: UnsafeCell::new(PerfEventArrayDef::new()),
|
||||
_t: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn output<C: EbpfContext>(&self, ctx: &C, data: &T, flags: u32) {
|
||||
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
|
||||
}
|
||||
|
||||
pub fn output_at_index<C: EbpfContext>(&self, ctx: &C, index: u32, data: &T, flags: u32) {
|
||||
let flags = u64::from(flags) << 32 | u64::from(index);
|
||||
unsafe {
|
||||
bpf_perf_event_output(
|
||||
ctx.as_ptr(),
|
||||
self.def.get() as *mut _,
|
||||
flags,
|
||||
data as *const _ as *mut _,
|
||||
mem::size_of::<T>() as u64,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
use core::cell::UnsafeCell;
|
||||
|
||||
use crate::{
|
||||
bindings::BPF_F_CURRENT_CPU, btf_maps::perf::PerfEventArrayDef, helpers::bpf_perf_event_output,
|
||||
EbpfContext,
|
||||
};
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct PerfEventByteArray<const F: usize = 0>(UnsafeCell<PerfEventArrayDef<F>>);
|
||||
|
||||
unsafe impl<const F: usize> Sync for PerfEventByteArray<F> {}
|
||||
|
||||
impl<const F: usize> PerfEventByteArray<F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(PerfEventArrayDef::new()))
|
||||
}
|
||||
|
||||
pub fn output<C: EbpfContext>(&self, ctx: &C, data: &[u8], flags: u32) {
|
||||
self.output_at_index(ctx, BPF_F_CURRENT_CPU as u32, data, flags)
|
||||
}
|
||||
|
||||
pub fn output_at_index<C: EbpfContext>(&self, ctx: &C, index: u32, data: &[u8], flags: u32) {
|
||||
let flags = u64::from(flags) << 32 | u64::from(index);
|
||||
unsafe {
|
||||
bpf_perf_event_output(
|
||||
ctx.as_ptr(),
|
||||
self.0.get() as *mut _,
|
||||
flags,
|
||||
data.as_ptr() as *mut _,
|
||||
data.len() as u64,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
use core::{cell::UnsafeCell, hint::unreachable_unchecked, mem};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_PROG_ARRAY, btf_maps::AyaBtfMapMarker, cty::c_long,
|
||||
helpers::bpf_tail_call, EbpfContext,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct ProgramArrayDef<const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_PROG_ARRAY as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<u32>()],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct ProgramArray<const M: usize, const F: usize = 0>(UnsafeCell<ProgramArrayDef<M, F>>);
|
||||
|
||||
impl<const M: usize, const F: usize> ProgramArray<M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(ProgramArrayDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_PROG_ARRAY as usize] as *const _,
|
||||
key_size: &[0i32; mem::size_of::<u32>()] as *const _,
|
||||
value_size: &[0i32; mem::size_of::<u32>()] as *const _,
|
||||
max_entries: &[0i32; M] as *const _,
|
||||
map_flags: &[0i32; F] as *const _,
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Perform a tail call into a program indexed by this map.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is inherently unsafe, since it causes control flow to jump into
|
||||
/// another eBPF program. This can have side effects, such as drop methods not being
|
||||
/// called. Note that tail calling into an eBPF program is not the same thing as
|
||||
/// a function call -- control flow never returns to the caller.
|
||||
///
|
||||
/// # Return Value
|
||||
///
|
||||
/// On success, this function **does not return** into the original program.
|
||||
/// On failure, a negative error is returned, wrapped in `Err()`.
|
||||
#[cfg(not(unstable))]
|
||||
pub unsafe fn tail_call<C: EbpfContext>(&self, ctx: &C, index: u32) -> Result<(), c_long> {
|
||||
let res = bpf_tail_call(ctx.as_ptr(), self.0.get() as *mut _, index);
|
||||
if res != 0 {
|
||||
Err(res)
|
||||
} else {
|
||||
unreachable_unchecked()
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a tail call into a program indexed by this map.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is inherently unsafe, since it causes control flow to jump into
|
||||
/// another eBPF program. This can have side effects, such as drop methods not being
|
||||
/// called. Note that tail calling into an eBPF program is not the same thing as
|
||||
/// a function call -- control flow never returns to the caller.
|
||||
///
|
||||
/// # Return Value
|
||||
///
|
||||
/// On success, this function **does not return** into the original program.
|
||||
/// On failure, a negative error is returned, wrapped in `Err()`.
|
||||
#[cfg(unstable)]
|
||||
pub unsafe fn tail_call<C: EbpfContext>(&self, ctx: &C, index: u32) -> Result<!, c_long> {
|
||||
let res = bpf_tail_call(ctx.as_ptr(), self.0.get() as *mut _, index);
|
||||
if res != 0 {
|
||||
Err(res)
|
||||
} else {
|
||||
unreachable_unchecked()
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,50 @@
|
||||
use core::{cell::UnsafeCell, mem, ptr};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_QUEUE,
|
||||
btf_maps::AyaBtfMapMarker,
|
||||
helpers::{bpf_map_pop_elem, bpf_map_push_elem},
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct QueueDef<T, const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_QUEUE as usize],
|
||||
value: *const T,
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct Queue<T, const M: usize, const F: usize = 0>(UnsafeCell<QueueDef<T, M, F>>);
|
||||
|
||||
unsafe impl<T: Sync, const M: usize, const F: usize> Sync for Queue<T, M, F> {}
|
||||
|
||||
impl<T, const M: usize, const F: usize> Queue<T, M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(QueueDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_QUEUE as usize],
|
||||
value: ptr::null(),
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> {
|
||||
let ret = unsafe {
|
||||
bpf_map_push_elem(self.0.get() as *mut _, value as *const _ as *const _, flags)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
|
||||
pub fn pop(&self) -> Option<T> {
|
||||
unsafe {
|
||||
let mut value = mem::MaybeUninit::uninit();
|
||||
let ret = bpf_map_pop_elem(self.0.get() as *mut _, value.as_mut_ptr() as *mut _);
|
||||
(ret == 0).then_some(value.assume_init())
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,159 @@
|
||||
use core::{
|
||||
cell::UnsafeCell,
|
||||
mem::{self, MaybeUninit},
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
#[cfg(unstable)]
|
||||
mod const_assert {
|
||||
pub struct Assert<const COND: bool> {}
|
||||
|
||||
pub trait IsTrue {}
|
||||
|
||||
impl IsTrue for Assert<true> {}
|
||||
}
|
||||
#[cfg(unstable)]
|
||||
use const_assert::{Assert, IsTrue};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_RINGBUF,
|
||||
btf_maps::AyaBtfMapMarker,
|
||||
helpers::{
|
||||
bpf_ringbuf_discard, bpf_ringbuf_output, bpf_ringbuf_query, bpf_ringbuf_reserve,
|
||||
bpf_ringbuf_submit,
|
||||
},
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct RingBufDef<const S: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_RINGBUF as usize],
|
||||
max_entries: *const [i32; S],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct RingBuf<const S: usize, const F: usize = 0>(UnsafeCell<RingBufDef<S, F>>);
|
||||
|
||||
unsafe impl<const S: usize, const F: usize> Sync for RingBuf<S, F> {}
|
||||
|
||||
/// A ring buffer entry, returned from [`RingBuf::reserve`].
|
||||
///
|
||||
/// You must [`submit`] or [`discard`] this entry before it gets dropped.
|
||||
///
|
||||
/// [`submit`]: RingBufEntry::submit
|
||||
/// [`discard`]: RingBufEntry::discard
|
||||
#[must_use = "eBPF verifier requires ring buffer entries to be either submitted or discarded"]
|
||||
pub struct RingBufEntry<T: 'static>(&'static mut MaybeUninit<T>);
|
||||
|
||||
impl<T> Deref for RingBufEntry<T> {
|
||||
type Target = MaybeUninit<T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for RingBufEntry<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RingBufEntry<T> {
|
||||
/// Discard this ring buffer entry. The entry will be skipped by the userspace reader.
|
||||
pub fn discard(self, flags: u64) {
|
||||
unsafe { bpf_ringbuf_discard(self.0.as_mut_ptr() as *mut _, flags) };
|
||||
}
|
||||
|
||||
/// Commit this ring buffer entry. The entry will be made visible to the userspace reader.
|
||||
pub fn submit(self, flags: u64) {
|
||||
unsafe { bpf_ringbuf_submit(self.0.as_mut_ptr() as *mut _, flags) };
|
||||
}
|
||||
}
|
||||
|
||||
impl<const S: usize, const F: usize> RingBuf<S, F> {
|
||||
/// Declare an eBPF ring buffer.
|
||||
///
|
||||
/// The linux kernel requires that `byte_size` be a power-of-2 multiple of the page size. The
|
||||
/// loading program may coerce the size when loading the map.
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(RingBufDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_RINGBUF as usize],
|
||||
max_entries: &[0i32; S],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Reserve memory in the ring buffer that can fit `T`.
|
||||
///
|
||||
/// Returns `None` if the ring buffer is full.
|
||||
#[cfg(unstable)]
|
||||
pub fn reserve<T: 'static>(&self, flags: u64) -> Option<RingBufEntry<T>>
|
||||
where
|
||||
Assert<{ 8 % mem::align_of::<T>() == 0 }>: IsTrue,
|
||||
{
|
||||
self.reserve_impl(flags)
|
||||
}
|
||||
|
||||
/// Reserve memory in the ring buffer that can fit `T`.
|
||||
///
|
||||
/// Returns `None` if the ring buffer is full.
|
||||
///
|
||||
/// The kernel will reserve memory at an 8-bytes aligned boundary, so `mem::align_of<T>()` must
|
||||
/// be equal or smaller than 8. If you use this with a `T` that isn't properly aligned, this
|
||||
/// function will be compiled to a panic; depending on your panic_handler, this may make
|
||||
/// the eBPF program fail to load, or it may make it have undefined behavior.
|
||||
#[cfg(not(unstable))]
|
||||
pub fn reserve<T: 'static>(&self, flags: u64) -> Option<RingBufEntry<T>> {
|
||||
assert_eq!(8 % mem::align_of::<T>(), 0);
|
||||
self.reserve_impl(flags)
|
||||
}
|
||||
|
||||
fn reserve_impl<T: 'static>(&self, flags: u64) -> Option<RingBufEntry<T>> {
|
||||
let ptr =
|
||||
unsafe { bpf_ringbuf_reserve(self.0.get() as *mut _, mem::size_of::<T>() as _, flags) }
|
||||
as *mut MaybeUninit<T>;
|
||||
unsafe { ptr.as_mut() }.map(|ptr| RingBufEntry(ptr))
|
||||
}
|
||||
|
||||
/// Copy `data` to the ring buffer output.
|
||||
///
|
||||
/// Consider using [`reserve`] and [`submit`] if `T` is statically sized and you want to save a
|
||||
/// copy from either a map buffer or the stack.
|
||||
///
|
||||
/// Unlike [`reserve`], this function can handle dynamically sized types (which is hard to
|
||||
/// create in eBPF but still possible, e.g. by slicing an array).
|
||||
///
|
||||
/// Note: `T` must be aligned to no more than 8 bytes; it's not possible to fulfill larger
|
||||
/// alignment requests. If you use this with a `T` that isn't properly aligned, this function will
|
||||
/// be compiled to a panic and silently make your eBPF program fail to load.
|
||||
/// See [here](https://github.com/torvalds/linux/blob/3f01e9fed/kernel/bpf/ringbuf.c#L418).
|
||||
///
|
||||
/// [`reserve`]: RingBuf::reserve
|
||||
/// [`submit`]: RingBufEntry::submit
|
||||
pub fn output<T: ?Sized>(&self, data: &T, flags: u64) -> Result<(), i64> {
|
||||
assert_eq!(8 % mem::align_of_val(data), 0);
|
||||
let ret = unsafe {
|
||||
bpf_ringbuf_output(
|
||||
self.0.get() as *mut _,
|
||||
data as *const _ as *mut _,
|
||||
mem::size_of_val(data) as _,
|
||||
flags,
|
||||
)
|
||||
};
|
||||
if ret < 0 {
|
||||
Err(ret)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Query various information about the ring buffer.
|
||||
///
|
||||
/// Consult `bpf_ringbuf_query` documentation for a list of allowed flags.
|
||||
pub fn query(&self, flags: u64) -> u64 {
|
||||
unsafe { bpf_ringbuf_query(self.0.get() as *mut _, flags) }
|
||||
}
|
||||
}
|
@ -0,0 +1,98 @@
|
||||
use core::{borrow::Borrow, cell::UnsafeCell, ptr};
|
||||
|
||||
use aya_ebpf_cty::c_void;
|
||||
|
||||
use crate::{
|
||||
bindings::{bpf_map_type::BPF_MAP_TYPE_SOCKHASH, bpf_sock_ops},
|
||||
btf_maps::AyaBtfMapMarker,
|
||||
helpers::{
|
||||
bpf_map_lookup_elem, bpf_msg_redirect_hash, bpf_sk_assign, bpf_sk_redirect_hash,
|
||||
bpf_sk_release, bpf_sock_hash_update,
|
||||
},
|
||||
programs::{SkBuffContext, SkLookupContext, SkMsgContext},
|
||||
EbpfContext,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct SockHashDef<K, const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_SOCKHASH as usize],
|
||||
key: *const K,
|
||||
value: *const u32,
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct SockHash<K, const M: usize, const F: usize = 0>(UnsafeCell<SockHashDef<K, M, F>>);
|
||||
|
||||
unsafe impl<K: Sync, const M: usize, const F: usize> Sync for SockHash<K, M, F> {}
|
||||
|
||||
impl<K, const M: usize, const F: usize> SockHash<K, M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(SockHashDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_SOCKHASH as usize],
|
||||
key: ptr::null(),
|
||||
value: ptr::null(),
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> {
|
||||
let ret = unsafe {
|
||||
bpf_sock_hash_update(
|
||||
sk_ops as *mut _,
|
||||
self.0.get() as *mut _,
|
||||
key as *mut _ as *mut c_void,
|
||||
flags,
|
||||
)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
|
||||
pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
|
||||
unsafe {
|
||||
bpf_msg_redirect_hash(
|
||||
ctx.as_ptr() as *mut _,
|
||||
self.0.get() as *mut _,
|
||||
key as *mut _ as *mut _,
|
||||
flags,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
|
||||
unsafe {
|
||||
bpf_sk_redirect_hash(
|
||||
ctx.as_ptr() as *mut _,
|
||||
self.0.get() as *mut _,
|
||||
key as *mut _ as *mut _,
|
||||
flags,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn redirect_sk_lookup(
|
||||
&mut self,
|
||||
ctx: &SkLookupContext,
|
||||
key: impl Borrow<K>,
|
||||
flags: u64,
|
||||
) -> Result<(), u32> {
|
||||
unsafe {
|
||||
let sk = bpf_map_lookup_elem(
|
||||
&mut self.0 as *mut _ as *mut _,
|
||||
&key as *const _ as *const c_void,
|
||||
);
|
||||
if sk.is_null() {
|
||||
return Err(1);
|
||||
}
|
||||
let ret = bpf_sk_assign(ctx.as_ptr() as *mut _, sk, flags);
|
||||
bpf_sk_release(sk);
|
||||
(ret == 0).then_some(()).ok_or(1)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
use core::{cell::UnsafeCell, ptr};
|
||||
|
||||
use aya_ebpf_cty::c_void;
|
||||
|
||||
use crate::{
|
||||
bindings::{bpf_map_type::BPF_MAP_TYPE_SOCKMAP, bpf_sock_ops},
|
||||
btf_maps::AyaBtfMapMarker,
|
||||
helpers::{
|
||||
bpf_map_lookup_elem, bpf_msg_redirect_map, bpf_sk_assign, bpf_sk_redirect_map,
|
||||
bpf_sk_release, bpf_sock_map_update,
|
||||
},
|
||||
programs::{SkBuffContext, SkLookupContext, SkMsgContext},
|
||||
EbpfContext,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct SockMapDef<const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_SOCKMAP as usize],
|
||||
key: *const u32,
|
||||
value: *const u32,
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct SockMap<const M: usize, const F: usize = 0>(UnsafeCell<SockMapDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for SockMap<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> SockMap<M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(SockMapDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_SOCKMAP as usize],
|
||||
key: ptr::null(),
|
||||
value: ptr::null(),
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub unsafe fn update(
|
||||
&self,
|
||||
mut index: u32,
|
||||
sk_ops: *mut bpf_sock_ops,
|
||||
flags: u64,
|
||||
) -> Result<(), i64> {
|
||||
let ret = bpf_sock_map_update(
|
||||
sk_ops,
|
||||
self.0.get() as *mut _,
|
||||
&mut index as *mut _ as *mut c_void,
|
||||
flags,
|
||||
);
|
||||
if ret == 0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ret)
|
||||
}
|
||||
}
|
||||
|
||||
pub unsafe fn redirect_msg(&self, ctx: &SkMsgContext, index: u32, flags: u64) -> i64 {
|
||||
bpf_msg_redirect_map(ctx.as_ptr() as *mut _, self.0.get() as *mut _, index, flags)
|
||||
}
|
||||
|
||||
pub unsafe fn redirect_skb(&self, ctx: &SkBuffContext, index: u32, flags: u64) -> i64 {
|
||||
bpf_sk_redirect_map(ctx.as_ptr() as *mut _, self.0.get() as *mut _, index, flags)
|
||||
}
|
||||
|
||||
pub fn redirect_sk_lookup(
|
||||
&mut self,
|
||||
ctx: &SkLookupContext,
|
||||
index: u32,
|
||||
flags: u64,
|
||||
) -> Result<(), u32> {
|
||||
unsafe {
|
||||
let sk = bpf_map_lookup_elem(
|
||||
&mut self.0 as *mut _ as *mut _,
|
||||
&index as *const _ as *const c_void,
|
||||
);
|
||||
if sk.is_null() {
|
||||
return Err(1);
|
||||
}
|
||||
let ret = bpf_sk_assign(ctx.as_ptr() as *mut _, sk, flags);
|
||||
bpf_sk_release(sk);
|
||||
(ret == 0).then_some(()).ok_or(1)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,53 @@
|
||||
use core::{cell::UnsafeCell, mem, ptr};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_STACK,
|
||||
btf_maps::AyaBtfMapMarker,
|
||||
helpers::{bpf_map_pop_elem, bpf_map_push_elem},
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct StackDef<T, const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_STACK as usize],
|
||||
value: *const T,
|
||||
max_entries: *const [i32; M],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct Stack<T, const M: usize, const F: usize = 0>(UnsafeCell<StackDef<T, M, F>>);
|
||||
|
||||
impl<T, const M: usize, const F: usize> Stack<T, M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(StackDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_STACK as usize],
|
||||
value: ptr::null(),
|
||||
max_entries: &[0i32; M],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn push(&mut self, value: &T, flags: u64) -> Result<(), i64> {
|
||||
let ret = unsafe {
|
||||
bpf_map_push_elem(
|
||||
&mut self.0 as *mut _ as *mut _,
|
||||
value as *const _ as *const _,
|
||||
flags,
|
||||
)
|
||||
};
|
||||
(ret == 0).then_some(()).ok_or(ret)
|
||||
}
|
||||
|
||||
pub fn pop(&mut self) -> Option<T> {
|
||||
unsafe {
|
||||
let mut value = mem::MaybeUninit::uninit();
|
||||
let ret = bpf_map_pop_elem(
|
||||
&mut self.0 as *mut _ as *mut _,
|
||||
value.as_mut_ptr() as *mut _,
|
||||
);
|
||||
(ret == 0).then_some(value.assume_init())
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
use core::{cell::UnsafeCell, mem};
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_STACK_TRACE, btf_maps::AyaBtfMapMarker,
|
||||
helpers::bpf_get_stackid, EbpfContext,
|
||||
};
|
||||
|
||||
const PERF_MAX_STACK_DEPTH: usize = 127;
|
||||
const VALUE_SIZE: usize = mem::size_of::<u64>() * PERF_MAX_STACK_DEPTH;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct StackTraceDef<const M: usize, const F: usize = 0> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_STACK_TRACE as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; VALUE_SIZE],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct StackTrace<const M: usize, const F: usize = 0>(UnsafeCell<StackTraceDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for StackTrace<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> StackTrace<M, F> {
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(StackTraceDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_STACK_TRACE as usize],
|
||||
key_size: &[0i32; mem::size_of::<u32>()],
|
||||
value_size: &[0i32; VALUE_SIZE],
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub unsafe fn get_stackid<C: EbpfContext>(&self, ctx: &C, flags: u64) -> Result<i64, i64> {
|
||||
let ret = bpf_get_stackid(ctx.as_ptr(), self.0.get() as *mut _, flags);
|
||||
if ret < 0 {
|
||||
Err(ret)
|
||||
} else {
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,92 @@
|
||||
use core::{cell::UnsafeCell, mem};
|
||||
|
||||
use aya_ebpf_bindings::bindings::bpf_cpumap_val;
|
||||
|
||||
use super::try_redirect_map;
|
||||
use crate::bindings::bpf_map_type::BPF_MAP_TYPE_CPUMAP;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct CpuMapDef<const M: usize, const F: usize> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_CPUMAP as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<bpf_cpumap_val>()],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
}
|
||||
|
||||
/// An array of available CPUs.
|
||||
///
|
||||
/// XDP programs can use this map to redirect packets to a target CPU for processing.
|
||||
///
|
||||
/// # Minimum kernel version
|
||||
///
|
||||
/// The minimum kernel version required to use this feature is 4.15.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, btf_maps::CpuMap, macros::{btf_map, xdp}, programs::XdpContext};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: CpuMap<8> = CpuMap::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// // Redirect to CPU 7 or drop packet if no entry found.
|
||||
/// MAP.redirect(7, xdp_action::XDP_DROP as u64).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
pub struct CpuMap<const M: usize, const F: usize = 0>(UnsafeCell<CpuMapDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for CpuMap<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> CpuMap<M, F> {
|
||||
/// Creates a [`CpuMap`] with a set maximum number of elements.
|
||||
///
|
||||
/// In a CPU map, an entry represents a CPU core. Thus there should be as many entries as there
|
||||
/// are CPU cores on the system. `max_entries` can be set to zero here, and updated by userspace
|
||||
/// at runtime. Refer to the userspace documentation for more information.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{macros::btf_map, btf_maps::CpuMap};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: CpuMap<8, 0> = CpuMap::new();
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(CpuMapDef {
|
||||
r#type: &[0i32; BPF_MAP_TYPE_CPUMAP as usize],
|
||||
key_size: &[0i32; mem::size_of::<u32>()],
|
||||
value_size: &[0i32; mem::size_of::<bpf_cpumap_val>()],
|
||||
max_entries: &[0i32; M],
|
||||
map_flags: &[0i32; F],
|
||||
}))
|
||||
}
|
||||
|
||||
/// Redirects the current packet on the CPU at `index`.
|
||||
///
|
||||
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
|
||||
/// can be used as the XDP program's return code if a CPU cannot be found.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, btf_maps::CpuMap, macros::{btf_map, xdp}, programs::XdpContext};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: CpuMap<8> = CpuMap::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// // Redirect to CPU 7 or drop packet if no entry found.
|
||||
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
|
||||
try_redirect_map(&self.0, index, flags)
|
||||
}
|
||||
}
|
@ -0,0 +1,138 @@
|
||||
use core::{cell::UnsafeCell, mem, num::NonZeroU32, ptr::NonNull};
|
||||
|
||||
use aya_ebpf_bindings::bindings::bpf_devmap_val;
|
||||
use aya_ebpf_cty::c_void;
|
||||
|
||||
use super::try_redirect_map;
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP, btf_maps::AyaBtfMapMarker,
|
||||
helpers::bpf_map_lookup_elem,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct DevMapDef<const M: usize, const F: usize> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_DEVMAP as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<bpf_devmap_val>()],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
/// An array of network devices.
|
||||
///
|
||||
/// XDP programs can use this map to redirect packets to other network deviecs.
|
||||
///
|
||||
/// # Minimum kernel version
|
||||
///
|
||||
/// The minimum kernel version required to use this feature is 4.14.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{
|
||||
/// bindings::xdp_action,
|
||||
/// btf_maps::DevMap,
|
||||
/// macros::{btf_map, xdp},
|
||||
/// programs::XdpContext,
|
||||
/// };
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMap<1> = DevMap::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// MAP.redirect(0, xdp_action::XDP_PASS as u64).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
pub struct DevMap<const M: usize, const F: usize = 0>(UnsafeCell<DevMapDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for DevMap<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> DevMap<M, F> {
|
||||
/// Creates a [`DevMap`] with a set maximum number of elements.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{macros::btf_map, btf_maps::DevMap};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMap<8> = DevMap::new();
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(DevMapDef {
|
||||
r#type: &[0; BPF_MAP_TYPE_DEVMAP as usize],
|
||||
key_size: &[0; mem::size_of::<u32>()],
|
||||
value_size: &[0; mem::size_of::<bpf_devmap_val>()],
|
||||
max_entries: &[0; M],
|
||||
map_flags: &[0; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Retrieves the interface index at `index` in the array.
|
||||
///
|
||||
/// To actually redirect a packet, see [`DevMap::redirect`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{macros::map, maps::DevMap};
|
||||
///
|
||||
/// #[map]
|
||||
/// static MAP: DevMap = DevMap::with_max_entries(1, 0);
|
||||
///
|
||||
/// let target_if_index = MAP.get(0).unwrap().if_index;
|
||||
///
|
||||
/// // redirect to if_index
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn get(&self, index: u32) -> Option<DevMapValue> {
|
||||
unsafe {
|
||||
let value =
|
||||
bpf_map_lookup_elem(self.0.get() as *mut _, &index as *const _ as *const c_void);
|
||||
NonNull::new(value as *mut bpf_devmap_val).map(|p| DevMapValue {
|
||||
if_index: p.as_ref().ifindex,
|
||||
// SAFETY: map writes use fd, map reads use id.
|
||||
// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/bpf.h#L6136
|
||||
prog_id: NonZeroU32::new(p.as_ref().bpf_prog.id),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Redirects the current packet on the interface at `index`.
|
||||
///
|
||||
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
|
||||
/// can be used as the XDP program's return code if a CPU cannot be found.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, macros::{map, xdp}, maps::DevMap, programs::XdpContext};
|
||||
///
|
||||
/// #[map]
|
||||
/// static MAP: DevMap = DevMap::with_max_entries(8, 0);
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
|
||||
try_redirect_map(&self.0, index, flags)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
/// The value of a device map.
|
||||
pub struct DevMapValue {
|
||||
/// Target interface index to redirect to.
|
||||
pub if_index: u32,
|
||||
/// Chained XDP program ID.
|
||||
pub prog_id: Option<NonZeroU32>,
|
||||
}
|
@ -0,0 +1,119 @@
|
||||
use core::{cell::UnsafeCell, mem, num::NonZeroU32, ptr::NonNull};
|
||||
|
||||
use aya_ebpf_bindings::bindings::bpf_devmap_val;
|
||||
use aya_ebpf_cty::c_void;
|
||||
|
||||
use super::{dev_map::DevMapValue, try_redirect_map};
|
||||
use crate::{bindings::bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH, helpers::bpf_map_lookup_elem};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct DevMapHashDef<const M: usize, const F: usize> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_DEVMAP_HASH as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<bpf_devmap_val>()],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
}
|
||||
|
||||
/// A map of network devices.
|
||||
///
|
||||
/// XDP programs can use this map to redirect packets to other network devices. It is similar to
|
||||
/// [`DevMap`](super::DevMap), but is an hash map rather than an array. Keys do not need to be
|
||||
/// contiguous nor start at zero, but there is a hashing cost to every lookup.
|
||||
///
|
||||
/// # Minimum kernel version
|
||||
///
|
||||
/// The minimum kernel version required to use this feature is 5.4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, btf_maps::DevMapHash, macros::{btf_map, xdp}, programs::XdpContext};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMapHash<1> = DevMapHash::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// MAP.redirect(42, xdp_action::XDP_PASS as u64).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[repr(transparent)]
|
||||
pub struct DevMapHash<const M: usize, const F: usize = 0>(UnsafeCell<DevMapHashDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for DevMapHash<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> DevMapHash<M, F> {
|
||||
/// Creates a [`DevMapHash`] with a set maximum number of elements.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{btf_maps::DevMapHash, macros::btf_map};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMapHash<8> = DevMapHash::new();
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(DevMapHashDef {
|
||||
r#type: &[0; BPF_MAP_TYPE_DEVMAP_HASH as usize],
|
||||
key_size: &[0; mem::size_of::<u32>()],
|
||||
value_size: &[0; mem::size_of::<bpf_devmap_val>()],
|
||||
max_entries: &[0; M],
|
||||
map_flags: &[0; F],
|
||||
}))
|
||||
}
|
||||
|
||||
/// Retrieves the interface index with `key` in the map.
|
||||
///
|
||||
/// To actually redirect a packet, see [`DevMapHash::redirect`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{btf_maps::DevMapHash, macros::btf_map};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMapHash<1> = DevMapHash::new();
|
||||
///
|
||||
/// let target_if_index = MAP.get(42).unwrap().if_index;
|
||||
///
|
||||
/// // redirect to ifindex
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn get(&self, key: u32) -> Option<DevMapValue> {
|
||||
unsafe {
|
||||
let value =
|
||||
bpf_map_lookup_elem(self.0.get() as *mut _, &key as *const _ as *const c_void);
|
||||
NonNull::new(value as *mut bpf_devmap_val).map(|p| DevMapValue {
|
||||
if_index: p.as_ref().ifindex,
|
||||
// SAFETY: map writes use fd, map reads use id.
|
||||
// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/bpf.h#L6136
|
||||
prog_id: NonZeroU32::new(p.as_ref().bpf_prog.id),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Redirects the current packet on the interface at `key`.
|
||||
///
|
||||
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
|
||||
/// can be used as the XDP program's return code if a CPU cannot be found.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, btf_maps::DevMapHash, macros::{btf_map, xdp}, programs::XdpContext};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static MAP: DevMapHash<8> = DevMapHash::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(_ctx: XdpContext) -> u32 {
|
||||
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn redirect(&self, key: u32, flags: u64) -> Result<u32, u32> {
|
||||
try_redirect_map(&self.0, key, flags)
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
mod cpu_map;
|
||||
mod dev_map;
|
||||
mod dev_map_hash;
|
||||
mod xsk_map;
|
||||
|
||||
use core::cell::UnsafeCell;
|
||||
|
||||
use aya_ebpf_bindings::{bindings::xdp_action::XDP_REDIRECT, helpers::bpf_redirect_map};
|
||||
pub use cpu_map::CpuMap;
|
||||
pub use dev_map::DevMap;
|
||||
pub use dev_map_hash::DevMapHash;
|
||||
pub use xsk_map::XskMap;
|
||||
|
||||
/// Wrapper aroung the `bpf_redirect_map` function.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// - `Ok(XDP_REDIRECT)` on success.
|
||||
/// - `Err(_)` of the lowest two bits of `flags` on failure.
|
||||
#[inline(always)]
|
||||
fn try_redirect_map<T>(def: &UnsafeCell<T>, key: u32, flags: u64) -> Result<u32, u32> {
|
||||
// Return XDP_REDIRECT on success, or the value of the two lower bits of the flags argument on
|
||||
// error. Thus I have no idea why it returns a long (i64) instead of something saner, hence the
|
||||
// unsigned_abs.
|
||||
let ret = unsafe { bpf_redirect_map(def.get() as *mut _, key.into(), flags) };
|
||||
match ret.unsigned_abs() as u32 {
|
||||
XDP_REDIRECT => Ok(XDP_REDIRECT),
|
||||
ret => Err(ret),
|
||||
}
|
||||
}
|
@ -0,0 +1,142 @@
|
||||
use core::{cell::UnsafeCell, mem, ptr::NonNull};
|
||||
|
||||
use aya_ebpf_bindings::bindings::bpf_xdp_sock;
|
||||
use aya_ebpf_cty::c_void;
|
||||
|
||||
use crate::{
|
||||
bindings::bpf_map_type::BPF_MAP_TYPE_XSKMAP,
|
||||
btf_maps::{xdp::try_redirect_map, AyaBtfMapMarker},
|
||||
helpers::bpf_map_lookup_elem,
|
||||
};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct XskMapDef<const M: usize, const F: usize> {
|
||||
r#type: *const [i32; BPF_MAP_TYPE_XSKMAP as usize],
|
||||
key_size: *const [i32; mem::size_of::<u32>()],
|
||||
value_size: *const [i32; mem::size_of::<u32>()],
|
||||
max_entries: *const [i32; M],
|
||||
map_flags: *const [i32; F],
|
||||
|
||||
// Anonymize the struct.
|
||||
_anon: AyaBtfMapMarker,
|
||||
}
|
||||
|
||||
/// An array of AF_XDP sockets.
|
||||
///
|
||||
/// XDP programs can use this map to redirect packets to a target AF_XDP socket using the
|
||||
/// `XDP_REDIRECT` action.
|
||||
///
|
||||
/// # Minimum kernel version
|
||||
///
|
||||
/// The minimum kernel version required to use this feature is 4.18.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, btf_maps::XskMap, macros::{btf_map, xdp}, programs::XdpContext};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static SOCKS: XskMap<8> = XskMap::new();
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(ctx: XdpContext) -> u32 {
|
||||
/// let queue_id = unsafe { (*ctx.ctx).rx_queue_index };
|
||||
/// SOCKS.redirect(queue_id, xdp_action::XDP_DROP as u64).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Queue management
|
||||
///
|
||||
/// Packets received on a RX queue can only be redirected to sockets bound on the same queue. Most
|
||||
/// hardware NICs have multiple RX queue to spread the load across multiple CPU cores using RSS.
|
||||
///
|
||||
/// Three strategies are possible:
|
||||
///
|
||||
/// - Reduce the RX queue count to a single one. This option is great for development, but is
|
||||
/// detrimental for performance as the single CPU core recieving packets will get overwhelmed.
|
||||
/// Setting the queue count for a NIC can be achieved using `ethtool -L <ifname> combined 1`.
|
||||
/// - Create a socket for every RX queue. Most modern NICs will have an RX queue per CPU thread, so
|
||||
/// a socket per CPU thread is best for performance. To dynamically size the map depending on the
|
||||
/// recieve queue count, see the userspace documentation of `CpuMap`.
|
||||
/// - Create a single socket and use a [`CpuMap`](super::CpuMap) to redirect the packet to the
|
||||
/// correct CPU core. This way, the packet is sent to another CPU, and a chained XDP program can
|
||||
/// the redirect to the AF_XDP socket. Using a single socket simplifies the userspace code but
|
||||
/// will not perform great unless not a lot of traffic is redirected to the socket. Regular
|
||||
/// traffic however will not be impacted, contrary to reducing the queue count.
|
||||
#[repr(transparent)]
|
||||
pub struct XskMap<const M: usize, const F: usize = 0>(UnsafeCell<XskMapDef<M, F>>);
|
||||
|
||||
unsafe impl<const M: usize, const F: usize> Sync for XskMap<M, F> {}
|
||||
|
||||
impl<const M: usize, const F: usize> XskMap<M, F> {
|
||||
/// Creates a [`XskMap`] with a set maximum number of elements.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{btf_maps::XskMap, macros::btf_map};
|
||||
///
|
||||
/// #[btf_map]
|
||||
/// static SOCKS: XskMap<8> = XskMap::new();
|
||||
/// ```
|
||||
pub const fn new() -> Self {
|
||||
Self(UnsafeCell::new(XskMapDef {
|
||||
r#type: &[0; BPF_MAP_TYPE_XSKMAP as usize],
|
||||
key_size: &[0; mem::size_of::<u32>()],
|
||||
value_size: &[0; mem::size_of::<u32>()],
|
||||
max_entries: &[0; M],
|
||||
map_flags: &[0; F],
|
||||
_anon: AyaBtfMapMarker::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Retrieves the queue to which the socket is bound at `index` in the array.
|
||||
///
|
||||
/// To actually redirect a packet, see [`XskMap::redirect`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{macros::map, maps::XskMap};
|
||||
///
|
||||
/// #[map]
|
||||
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
|
||||
///
|
||||
/// let queue_id = SOCKS.get(0);
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn get(&self, index: u32) -> Option<u32> {
|
||||
unsafe {
|
||||
let value =
|
||||
bpf_map_lookup_elem(self.0.get() as *mut _, &index as *const _ as *const c_void);
|
||||
NonNull::new(value as *mut bpf_xdp_sock).map(|p| p.as_ref().queue_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Redirects the current packet to the AF_XDP socket at `index`.
|
||||
///
|
||||
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
|
||||
/// can be used as the XDP program's return code if a matching socket cannot be found.
|
||||
///
|
||||
/// However, if the socket at `index` is bound to a RX queue which is not the current RX queue,
|
||||
/// the packet will be dropped.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust,no_run
|
||||
/// use aya_ebpf::{bindings::xdp_action, macros::{map, xdp}, maps::XskMap, programs::XdpContext};
|
||||
///
|
||||
/// #[map]
|
||||
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
|
||||
///
|
||||
/// #[xdp]
|
||||
/// fn xdp(ctx: XdpContext) -> u32 {
|
||||
/// let queue_id = unsafe { (*ctx.ctx).rx_queue_index };
|
||||
/// SOCKS.redirect(queue_id, 0).unwrap_or(xdp_action::XDP_DROP)
|
||||
/// }
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
|
||||
try_redirect_map(&self.0, index, flags)
|
||||
}
|
||||
}
|
@ -0,0 +1,5 @@
|
||||
[target.bpfeb-unknown-none]
|
||||
rustflags = "-C debuginfo=2 -C link-arg=--btf"
|
||||
|
||||
[target.bpfel-unknown-none]
|
||||
rustflags = "-C debuginfo=2 -C link-arg=--btf"
|
@ -0,0 +1,74 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use aya_ebpf::{
|
||||
cty::c_long,
|
||||
macros::{map, uprobe},
|
||||
maps::{array::Array, hash_map::HashMap},
|
||||
programs::ProbeContext,
|
||||
};
|
||||
|
||||
#[map]
|
||||
static HASH_MAP: HashMap<u32, u32> = HashMap::with_max_entries(10, 0);
|
||||
|
||||
#[map]
|
||||
static RESULT: Array<u32> = Array::with_max_entries(1, 0);
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_insert(ctx: ProbeContext) {
|
||||
match try_hash_map_insert(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_insert(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
let value: u32 = ctx.arg(1).ok_or(1)?;
|
||||
|
||||
HASH_MAP.insert(&key, &value, 0)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_get(ctx: ProbeContext) {
|
||||
match try_hash_map_get(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_get(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
// Retrieve the value from the map.
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
let res = unsafe { HASH_MAP.get(&key).ok_or(1)? };
|
||||
|
||||
// Save it in the array.
|
||||
let ptr = RESULT.get_ptr_mut(0).ok_or(1)?;
|
||||
unsafe { *ptr = *res };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_remove(ctx: ProbeContext) {
|
||||
match try_hash_map_remove(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_remove(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
|
||||
HASH_MAP.remove(&key)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[panic_handler]
|
||||
fn panic(_info: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use aya_ebpf::{
|
||||
btf_maps::{array::Array, hash_map::HashMap},
|
||||
cty::c_long,
|
||||
macros::{btf_map, uprobe},
|
||||
programs::ProbeContext,
|
||||
};
|
||||
|
||||
#[btf_map]
|
||||
static HASH_MAP: HashMap<u32, u32, 10> = HashMap::new();
|
||||
|
||||
#[btf_map]
|
||||
static RESULT: Array<u32, 1> = Array::new();
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_insert(ctx: ProbeContext) {
|
||||
match try_hash_map_insert(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_insert(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
let value: u32 = ctx.arg(1).ok_or(1)?;
|
||||
|
||||
HASH_MAP.insert(&key, &value, 0)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_get(ctx: ProbeContext) {
|
||||
match try_hash_map_get(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_get(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
// Retrieve the value from the map.
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
let res = unsafe { HASH_MAP.get(&key).ok_or(1)? };
|
||||
|
||||
// Save it in the array.
|
||||
let ptr = RESULT.get_ptr_mut(0).ok_or(1)?;
|
||||
unsafe { *ptr = *res };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[uprobe]
|
||||
pub fn hash_map_remove(ctx: ProbeContext) {
|
||||
match try_hash_map_remove(ctx) {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_hash_map_remove(ctx: ProbeContext) -> Result<(), c_long> {
|
||||
let key: u32 = ctx.arg(0).ok_or(1)?;
|
||||
|
||||
HASH_MAP.remove(&key)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[panic_handler]
|
||||
fn panic(_info: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use aya_ebpf::{
|
||||
bindings::xdp_action,
|
||||
btf_maps::{Array, CpuMap, DevMap, DevMapHash, XskMap},
|
||||
macros::{btf_map, xdp},
|
||||
programs::XdpContext,
|
||||
};
|
||||
|
||||
#[btf_map]
|
||||
static SOCKS: XskMap<1> = XskMap::new();
|
||||
#[btf_map]
|
||||
static DEVS: DevMap<1> = DevMap::new();
|
||||
#[btf_map]
|
||||
static DEVS_HASH: DevMapHash<1> = DevMapHash::new();
|
||||
#[btf_map]
|
||||
static CPUS: CpuMap<1> = CpuMap::new();
|
||||
|
||||
/// Hits of a probe, used to test program chaining through CpuMap/DevMap.
|
||||
/// The first slot counts how many times the "raw" xdp program got executed, while the second slot
|
||||
/// counts how many times the map programs got executed.
|
||||
/// This allows the test harness to assert that a specific step got executed.
|
||||
#[btf_map]
|
||||
static HITS: Array<u32, 2> = Array::new();
|
||||
|
||||
#[xdp]
|
||||
pub fn redirect_sock(_ctx: XdpContext) -> u32 {
|
||||
SOCKS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
|
||||
}
|
||||
|
||||
#[xdp]
|
||||
pub fn redirect_dev(_ctx: XdpContext) -> u32 {
|
||||
inc_hit(0);
|
||||
DEVS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
|
||||
}
|
||||
|
||||
#[xdp]
|
||||
pub fn redirect_dev_hash(_ctx: XdpContext) -> u32 {
|
||||
inc_hit(0);
|
||||
DEVS_HASH.redirect(10, 0).unwrap_or(xdp_action::XDP_ABORTED)
|
||||
}
|
||||
|
||||
#[xdp]
|
||||
pub fn redirect_cpu(_ctx: XdpContext) -> u32 {
|
||||
inc_hit(0);
|
||||
CPUS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
|
||||
}
|
||||
|
||||
#[xdp(map = "cpumap")]
|
||||
pub fn redirect_cpu_chain(_ctx: XdpContext) -> u32 {
|
||||
inc_hit(1);
|
||||
xdp_action::XDP_PASS
|
||||
}
|
||||
|
||||
#[xdp(map = "devmap")]
|
||||
pub fn redirect_dev_chain(_ctx: XdpContext) -> u32 {
|
||||
inc_hit(1);
|
||||
xdp_action::XDP_PASS
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn inc_hit(index: u32) {
|
||||
if let Some(hit) = HITS.get_ptr_mut(index) {
|
||||
unsafe { *hit += 1 };
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[panic_handler]
|
||||
fn panic(_info: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use aya_ebpf::{
|
||||
btf_maps::{PerCpuArray, RingBuf},
|
||||
macros::{btf_map, uprobe},
|
||||
programs::ProbeContext,
|
||||
};
|
||||
|
||||
#[btf_map]
|
||||
static RING_BUF: RingBuf<0> = RingBuf::new();
|
||||
|
||||
// This structure's definition is duplicated in userspace.
|
||||
#[repr(C)]
|
||||
struct Registers {
|
||||
dropped: u64,
|
||||
rejected: u64,
|
||||
}
|
||||
|
||||
// Use a PerCpuArray to store the registers so that we can update the values from multiple CPUs
|
||||
// without needing synchronization. Atomics exist [1], but aren't exposed.
|
||||
//
|
||||
// [1]: https://lwn.net/Articles/838884/
|
||||
#[btf_map]
|
||||
static REGISTERS: PerCpuArray<Registers, 1> = PerCpuArray::new();
|
||||
|
||||
#[uprobe]
|
||||
pub fn ring_buf_test(ctx: ProbeContext) {
|
||||
let Registers { dropped, rejected } = match REGISTERS.get_ptr_mut(0) {
|
||||
Some(regs) => unsafe { &mut *regs },
|
||||
None => return,
|
||||
};
|
||||
let mut entry = match RING_BUF.reserve::<u64>(0) {
|
||||
Some(entry) => entry,
|
||||
None => {
|
||||
*dropped += 1;
|
||||
return;
|
||||
}
|
||||
};
|
||||
// Write the first argument to the function back out to RING_BUF if it is even,
|
||||
// otherwise increment the counter in REJECTED. This exercises discarding data.
|
||||
let arg: u64 = match ctx.arg(0) {
|
||||
Some(arg) => arg,
|
||||
None => return,
|
||||
};
|
||||
if arg % 2 == 0 {
|
||||
entry.write(arg);
|
||||
entry.submit(0);
|
||||
} else {
|
||||
*rejected += 1;
|
||||
entry.discard(0);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[panic_handler]
|
||||
fn panic(_info: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
use aya::{
|
||||
maps::{Array, HashMap, MapError},
|
||||
programs::UProbe,
|
||||
Ebpf,
|
||||
};
|
||||
|
||||
#[no_mangle]
|
||||
#[inline(never)]
|
||||
pub extern "C" fn trigger_hash_map_insert(_key: u32, _value: u32) {
|
||||
core::hint::black_box(trigger_hash_map_insert);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[inline(never)]
|
||||
pub extern "C" fn trigger_hash_map_get(_key: u32) {
|
||||
core::hint::black_box(trigger_hash_map_get);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
#[inline(never)]
|
||||
pub extern "C" fn trigger_hash_map_remove(_key: u32) {
|
||||
core::hint::black_box(trigger_hash_map_remove);
|
||||
}
|
||||
|
||||
#[test_case::test_case(crate::MAPS; "legacy maps")]
|
||||
#[test_case::test_case(crate::MAPS_BTF; "BTF maps")]
|
||||
fn test_hash_map(prog: &[u8]) {
|
||||
let mut ebpf = Ebpf::load(prog).unwrap();
|
||||
|
||||
{
|
||||
let insert_prog: &mut UProbe = ebpf
|
||||
.program_mut("hash_map_insert")
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
insert_prog.load().unwrap();
|
||||
insert_prog
|
||||
.attach(Some("trigger_hash_map_insert"), 0, "/proc/self/exe", None)
|
||||
.unwrap();
|
||||
|
||||
trigger_hash_map_insert(69, 420);
|
||||
|
||||
let hash_map: HashMap<_, u32, u32> =
|
||||
HashMap::try_from(ebpf.map_mut("HASH_MAP").unwrap()).unwrap();
|
||||
let value = hash_map.get(&69, 0).unwrap();
|
||||
assert_eq!(value, 420);
|
||||
}
|
||||
{
|
||||
let get_prog: &mut UProbe = ebpf
|
||||
.program_mut("hash_map_get")
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
get_prog.load().unwrap();
|
||||
get_prog
|
||||
.attach(Some("trigger_hash_map_get"), 0, "/proc/self/exe", None)
|
||||
.unwrap();
|
||||
|
||||
trigger_hash_map_get(69);
|
||||
|
||||
let results: Array<_, u32> = Array::try_from(ebpf.map_mut("RESULT").unwrap()).unwrap();
|
||||
let value = results.get(&0, 0).unwrap();
|
||||
assert_eq!(value, 420);
|
||||
}
|
||||
{
|
||||
let remove_prog: &mut UProbe = ebpf
|
||||
.program_mut("hash_map_remove")
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap();
|
||||
remove_prog.load().unwrap();
|
||||
remove_prog
|
||||
.attach(Some("trigger_hash_map_remove"), 0, "/proc/self/exe", None)
|
||||
.unwrap();
|
||||
|
||||
trigger_hash_map_remove(69);
|
||||
let hash_map: HashMap<_, u32, u32> =
|
||||
HashMap::try_from(ebpf.map_mut("HASH_MAP").unwrap()).unwrap();
|
||||
let res = hash_map.get(&69, 0);
|
||||
assert!(matches!(res.err(), Some(MapError::KeyNotFound)));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue