Merge branch 'main' into new_sarg

pull/773/head
pdliyan 2 years ago
commit 5ed278a766

@ -236,7 +236,7 @@ jobs:
# linux-image-5.10.0-23-cloud-amd64-unsigned_5.10.179-3_amd64.deb \
printf '%s\0' \
linux-image-6.1.0-10-cloud-amd64-unsigned_6.1.38-2_amd64.deb \
linux-image-6.4.0-3-cloud-amd64-unsigned_6.4.11-1_amd64.deb \
linux-image-6.4.0-4-cloud-amd64-unsigned_6.4.13-1_amd64.deb \
| xargs -0 -t -P0 -I {} wget -nd -nv -P test/.tmp/debian-kernels/amd64 ftp://ftp.us.debian.org/debian/pool/main/l/linux/{}
- name: Extract debian kernels

@ -57,13 +57,13 @@ aya-log-common = { path = "aya-log-common", version = "0.1.13", default-features
aya-log-parser = { path = "aya-log-parser", default-features = false }
aya-obj = { path = "aya-obj", version = "0.1.0", default-features = false }
aya-tool = { path = "aya-tool", default-features = false }
bindgen = { version = "0.66", default-features = false }
bindgen = { version = "0.68", default-features = false }
bitflags = { version = "2.2.1", default-features = false }
bytes = { version = "1", default-features = false }
cargo_metadata = { version = "0.17.0", default-features = false }
cargo_metadata = { version = "0.18.0", default-features = false }
clap = { version = "4", default-features = false }
core-error = { version = "0.0.0", default-features = false }
dialoguer = { version = "0.10", default-features = false }
dialoguer = { version = "0.11", default-features = false }
diff = { version = "0.1.13", default-features = false }
env_logger = { version = "0.10", default-features = false }
hashbrown = { version = "0.14", default-features = false }

@ -2,14 +2,15 @@
[![Crates.io][crates-badge]][crates-url]
![License][license-badge]
![Build status][build-badge]
[![Build status][build-badge]][build-url]
[![Book][book-badge]][book-url]
[crates-badge]: https://img.shields.io/crates/v/aya.svg?style=for-the-badge&logo=rust
[crates-url]: https://crates.io/crates/aya
[license-badge]: https://img.shields.io/badge/license-MIT%2FApache--2.0-blue?style=for-the-badge
[build-badge]: https://img.shields.io/github/actions/workflow/status/aya-rs/aya/build-aya.yml?branch=main&style=for-the-badge
[build-badge]: https://img.shields.io/github/actions/workflow/status/aya-rs/aya/ci.yml?style=for-the-badge
[build-url]: https://github.com/aya-rs/aya/actions/workflows/ci.yml
[book-badge]: https://img.shields.io/badge/read%20the-book-9cf.svg?style=for-the-badge&logo=mdbook
[book-url]: https://aya-rs.dev/book

@ -128,6 +128,28 @@ pub fn sk_msg(attrs: TokenStream, item: TokenStream) -> TokenStream {
}
}
/// Marks a function as an eBPF XDP program that can be attached to a network interface.
///
/// On some NIC drivers, XDP probes are compatible with jumbo frames through the use of
/// multi-buffer packets. Programs can opt-in this support by passing the `frags` argument.
///
/// XDP programs can also be chained through the use of CPU maps and dev maps, but must opt-in
/// with the `map = "cpumap"` or `map = "devmap"` arguments.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.8.
///
/// # Examples
///
/// ```no_run
/// use aya_bpf::{bindings::xdp_action::XDP_PASS, macros::xdp, programs::XdpContext};
///
/// #[xdp(frags)]
/// pub fn xdp(ctx: XdpContext) -> u32 {
/// XDP_PASS
/// }
/// ```
#[proc_macro_error]
#[proc_macro_attribute]
pub fn xdp(attrs: TokenStream, item: TokenStream) -> TokenStream {

@ -1,31 +1,52 @@
use std::borrow::Cow;
use proc_macro2::TokenStream;
use quote::quote;
use syn::{ItemFn, Result};
use syn::{Error, ItemFn, Result};
use crate::args::{err_on_unknown_args, pop_bool_arg, Args};
use crate::args::{err_on_unknown_args, pop_bool_arg, pop_string_arg, Args};
pub(crate) struct Xdp {
item: ItemFn,
frags: bool,
map: Option<XdpMap>,
}
#[derive(Clone, Copy)]
pub(crate) enum XdpMap {
CpuMap,
DevMap,
}
impl Xdp {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Xdp> {
let item = syn::parse2(item)?;
let mut args: Args = syn::parse2(attrs)?;
let frags = pop_bool_arg(&mut args, "frags");
let map = match pop_string_arg(&mut args, "map").as_deref() {
Some("cpumap") => Some(XdpMap::CpuMap),
Some("devmap") => Some(XdpMap::DevMap),
Some(name) => {
return Err(Error::new_spanned(
"map",
format!("Invalid value. Expected 'cpumap' or 'devmap', found '{name}'"),
))
}
None => None,
};
err_on_unknown_args(&args)?;
Ok(Xdp { item, frags })
Ok(Xdp { item, frags, map })
}
pub(crate) fn expand(&self) -> Result<TokenStream> {
let section_name: Cow<'_, _> = if self.frags {
"xdp.frags".into()
} else {
"xdp".into()
let mut section_name = vec![if self.frags { "xdp.frags" } else { "xdp" }];
match self.map {
Some(XdpMap::CpuMap) => section_name.push("cpumap"),
Some(XdpMap::DevMap) => section_name.push("devmap"),
None => (),
};
let section_name = section_name.join("/");
let fn_vis = &self.item.vis;
let fn_name = self.item.sig.ident.clone();
let item = &self.item;
@ -97,4 +118,122 @@ mod tests {
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_cpumap() {
let prog = Xdp::parse(
parse_quote! { map = "cpumap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/cpumap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_devmap() {
let prog = Xdp::parse(
parse_quote! { map = "devmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp/devmap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
#[should_panic(expected = "Invalid value. Expected 'cpumap' or 'devmap', found 'badmap'")]
fn test_xdp_bad_map() {
Xdp::parse(
parse_quote! { map = "badmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
}
#[test]
fn test_xdp_frags_cpumap() {
let prog = Xdp::parse(
parse_quote! { frags, map = "cpumap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/cpumap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
#[test]
fn test_xdp_frags_devmap() {
let prog = Xdp::parse(
parse_quote! { frags, map = "devmap" },
parse_quote! {
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
},
)
.unwrap();
let expanded = prog.expand().unwrap();
let expected = quote! {
#[no_mangle]
#[link_section = "xdp.frags/devmap"]
fn prog(ctx: *mut ::aya_bpf::bindings::xdp_md) -> u32 {
return prog(::aya_bpf::programs::XdpContext::new(ctx));
fn prog(ctx: &mut ::aya_bpf::programs::XdpContext) -> i32 {
0
}
}
};
assert_eq!(expected.to_string(), expanded.to_string());
}
}

@ -77,6 +77,12 @@ mod std {
pub use core_error::Error;
}
pub use core::*;
pub mod os {
pub mod fd {
pub type RawFd = core::ffi::c_int;
}
}
}
pub mod btf;

@ -176,6 +176,14 @@ impl Map {
}
}
/// Set the value size in bytes
pub fn set_value_size(&mut self, size: u32) {
match self {
Map::Legacy(m) => m.def.value_size = size,
Map::Btf(m) => m.def.value_size = size,
}
}
/// Returns the max entry number
pub fn max_entries(&self) -> u32 {
match self {

@ -19,6 +19,7 @@ use crate::{
btf::BtfFeatures,
generated::{BPF_CALL, BPF_JMP, BPF_K},
maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE},
programs::XdpAttachType,
relocation::*,
util::HashMap,
};
@ -47,17 +48,22 @@ pub struct Features {
bpf_perf_link: bool,
bpf_global_data: bool,
bpf_cookie: bool,
cpumap_prog_id: bool,
devmap_prog_id: bool,
btf: Option<BtfFeatures>,
}
impl Features {
#[doc(hidden)]
#[allow(clippy::too_many_arguments)]
pub fn new(
bpf_name: bool,
bpf_probe_read_kernel: bool,
bpf_perf_link: bool,
bpf_global_data: bool,
bpf_cookie: bool,
cpumap_prog_id: bool,
devmap_prog_id: bool,
btf: Option<BtfFeatures>,
) -> Self {
Self {
@ -66,6 +72,8 @@ impl Features {
bpf_perf_link,
bpf_global_data,
bpf_cookie,
cpumap_prog_id,
devmap_prog_id,
btf,
}
}
@ -95,6 +103,16 @@ impl Features {
self.bpf_cookie
}
/// Returns whether XDP CPU Maps support chained program IDs.
pub fn cpumap_prog_id(&self) -> bool {
self.cpumap_prog_id
}
/// Returns whether XDP Device Maps support chained program IDs.
pub fn devmap_prog_id(&self) -> bool {
self.devmap_prog_id
}
/// If BTF is supported, returns which BTF features are supported.
pub fn btf(&self) -> Option<&BtfFeatures> {
self.btf.as_ref()
@ -204,8 +222,6 @@ pub struct Function {
/// - `struct_ops+`
/// - `fmod_ret+`, `fmod_ret.s+`
/// - `iter+`, `iter.s+`
/// - `xdp.frags/cpumap`, `xdp/cpumap`
/// - `xdp.frags/devmap`, `xdp/devmap`
#[derive(Debug, Clone)]
#[allow(missing_docs)]
pub enum ProgramSection {
@ -221,6 +237,7 @@ pub enum ProgramSection {
SocketFilter,
Xdp {
frags: bool,
attach_type: XdpAttachType,
},
SkMsg,
SkSkbStreamParser,
@ -283,8 +300,19 @@ impl FromStr for ProgramSection {
"uprobe.s" => UProbe { sleepable: true },
"uretprobe" => URetProbe { sleepable: false },
"uretprobe.s" => URetProbe { sleepable: true },
"xdp" => Xdp { frags: false },
"xdp.frags" => Xdp { frags: true },
"xdp" | "xdp.frags" => Xdp {
frags: kind == "xdp.frags",
attach_type: match pieces.next() {
None => XdpAttachType::Interface,
Some("cpumap") => XdpAttachType::CpuMap,
Some("devmap") => XdpAttachType::DevMap,
Some(_) => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
})
}
},
},
"tp_btf" => BtfTracePoint,
"tracepoint" | "tp" => TracePoint,
"socket" => SocketFilter,
@ -2012,7 +2040,7 @@ mod tests {
assert_matches!(
obj.parse_section(fake_section(
BpfSectionKind::Program,
"xdp/foo",
"xdp",
bytes_of(&fake_ins()),
None
)),
@ -2035,7 +2063,7 @@ mod tests {
assert_matches!(
obj.parse_section(fake_section(
BpfSectionKind::Program,
"xdp.frags/foo",
"xdp.frags",
bytes_of(&fake_ins()),
None
)),

@ -3,7 +3,9 @@
pub mod cgroup_sock;
pub mod cgroup_sock_addr;
pub mod cgroup_sockopt;
pub mod xdp;
pub use cgroup_sock::CgroupSockAttachType;
pub use cgroup_sock_addr::CgroupSockAddrAttachType;
pub use cgroup_sockopt::CgroupSockoptAttachType;
pub use xdp::XdpAttachType;

@ -0,0 +1,24 @@
//! XDP programs.
use crate::generated::bpf_attach_type;
/// Defines where to attach an `XDP` program.
#[derive(Copy, Clone, Debug)]
pub enum XdpAttachType {
/// Attach to a network interface.
Interface,
/// Attach to a cpumap. Requires kernel 5.9 or later.
CpuMap,
/// Attach to a devmap. Requires kernel 5.8 or later.
DevMap,
}
impl From<XdpAttachType> for bpf_attach_type {
fn from(value: XdpAttachType) -> Self {
match value {
XdpAttachType::Interface => bpf_attach_type::BPF_XDP,
XdpAttachType::CpuMap => bpf_attach_type::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => bpf_attach_type::BPF_XDP_DEVMAP,
}
}
}

@ -105,7 +105,7 @@ pub(crate) struct Symbol {
impl Object {
/// Relocates the map references
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, i32, &'a Map)>>(
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, std::os::fd::RawFd, &'a Map)>>(
&mut self,
maps: I,
text_sections: &HashSet<usize>,
@ -178,8 +178,8 @@ impl Object {
fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
fun: &mut Function,
relocations: I,
maps_by_section: &HashMap<usize, (&str, i32, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, i32, &Map)>,
maps_by_section: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, std::os::fd::RawFd, &Map)>,
symbol_table: &HashMap<usize, Symbol>,
text_sections: &HashSet<usize>,
) -> Result<(), RelocationError> {

@ -12,6 +12,7 @@ edition = "2021"
rust-version = "1.66"
[dependencies]
assert_matches = { workspace = true }
async-io = { workspace = true, optional = true }
aya-obj = { workspace = true, features = ["std"] }
bitflags = { workspace = true }
@ -28,7 +29,6 @@ thiserror = { workspace = true }
tokio = { workspace = true, features = ["rt"], optional = true }
[dev-dependencies]
assert_matches = { workspace = true }
tempfile = { workspace = true }
[features]

@ -3,7 +3,7 @@ use std::{
collections::{HashMap, HashSet},
fs, io,
os::{
fd::{AsFd as _, OwnedFd},
fd::{AsFd as _, AsRawFd as _, OwnedFd},
raw::c_int,
},
path::{Path, PathBuf},
@ -36,12 +36,12 @@ use crate::{
SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp,
},
sys::{
bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_bpf_cookie_supported,
is_bpf_global_data_supported, is_btf_datasec_supported, is_btf_decl_tag_supported,
is_btf_enum64_supported, is_btf_float_supported, is_btf_func_global_supported,
is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_probe_read_kernel_supported, is_prog_name_supported, retry_with_verifier_logs,
SyscallError,
bpf_load_btf, is_bpf_cookie_supported, is_bpf_global_data_supported,
is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_enum64_supported,
is_btf_float_supported, is_btf_func_global_supported, is_btf_func_supported,
is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_probe_read_kernel_supported, is_prog_id_supported, is_prog_name_supported,
retry_with_verifier_logs,
},
util::{bytes_of, bytes_of_slice, possible_cpus, POSSIBLE_CPUS},
};
@ -94,6 +94,8 @@ fn detect_features() -> Features {
is_perf_link_supported(),
is_bpf_global_data_supported(),
is_bpf_cookie_supported(),
is_prog_id_supported(BPF_MAP_TYPE_CPUMAP),
is_prog_id_supported(BPF_MAP_TYPE_DEVMAP),
btf,
);
debug!("BPF Feature Detection: {:#?}", f);
@ -413,7 +415,10 @@ impl<'a> BpfLoader<'a> {
| ProgramSection::URetProbe { sleepable: _ }
| ProgramSection::TracePoint
| ProgramSection::SocketFilter
| ProgramSection::Xdp { frags: _ }
| ProgramSection::Xdp {
frags: _,
attach_type: _,
}
| ProgramSection::SkMsg
| ProgramSection::SkSkbStreamParser
| ProgramSection::SkSkbStreamVerdict
@ -474,6 +479,15 @@ impl<'a> BpfLoader<'a> {
}
}
}
match obj.map_type().try_into() {
Ok(BPF_MAP_TYPE_CPUMAP) => {
obj.set_value_size(if FEATURES.cpumap_prog_id() { 8 } else { 4 })
}
Ok(BPF_MAP_TYPE_DEVMAP | BPF_MAP_TYPE_DEVMAP_HASH) => {
obj.set_value_size(if FEATURES.devmap_prog_id() { 8 } else { 4 })
}
_ => (),
}
let btf_fd = btf_fd.as_deref().map(|fd| fd.as_fd());
let mut map = match obj.pinning() {
PinningType::None => MapData::create(obj, &name, btf_fd)?,
@ -482,23 +496,7 @@ impl<'a> BpfLoader<'a> {
MapData::create_pinned(path, obj, &name, btf_fd)?
}
};
let fd = map.fd;
if !map.obj.data().is_empty() && map.obj.section_kind() != BpfSectionKind::Bss {
bpf_map_update_elem_ptr(fd, &0 as *const _, map.obj.data_mut().as_mut_ptr(), 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
})
.map_err(MapError::from)?;
}
if map.obj.section_kind() == BpfSectionKind::Rodata {
bpf_map_freeze(fd)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_freeze",
io_error,
})
.map_err(MapError::from)?;
}
map.finalize()?;
maps.insert(name, map);
}
@ -510,7 +508,7 @@ impl<'a> BpfLoader<'a> {
obj.relocate_maps(
maps.iter()
.map(|(s, data)| (s.as_str(), data.fd, &data.obj)),
.map(|(s, data)| (s.as_str(), data.fd().as_fd().as_raw_fd(), data.obj())),
&text_sections,
)?;
obj.relocate_calls(&text_sections)?;
@ -573,13 +571,18 @@ impl<'a> BpfLoader<'a> {
ProgramSection::SocketFilter => Program::SocketFilter(SocketFilter {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
}),
ProgramSection::Xdp { frags, .. } => {
ProgramSection::Xdp {
frags, attach_type, ..
} => {
let mut data =
ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level);
if *frags {
data.flags = BPF_F_XDP_HAS_FRAGS;
}
Program::Xdp(Xdp { data })
Program::Xdp(Xdp {
data,
attach_type: *attach_type,
})
}
ProgramSection::SkMsg => Program::SkMsg(SkMsg {
data: ProgramData::new(prog_name, obj, btf_fd, *verifier_log_level),
@ -691,7 +694,7 @@ impl<'a> BpfLoader<'a> {
if !*allow_unsupported_maps {
maps.iter().try_for_each(|(_, x)| match x {
Map::Unsupported(map) => Err(BpfError::MapError(MapError::Unsupported {
map_type: map.obj.map_type(),
map_type: map.obj().map_type(),
})),
_ => Ok(()),
})?;
@ -705,7 +708,7 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
let name = data.0;
let map = data.1;
let map_type =
bpf_map_type::try_from(map.obj.map_type()).map_err(|e| MapError::InvalidMapType {
bpf_map_type::try_from(map.obj().map_type()).map_err(|e| MapError::InvalidMapType {
map_type: e.map_type,
})?;
let map = match map_type {
@ -724,6 +727,10 @@ fn parse_map(data: (String, MapData)) -> Result<(String, Map), BpfError> {
BPF_MAP_TYPE_STACK => Map::Stack(map),
BPF_MAP_TYPE_STACK_TRACE => Map::StackTraceMap(map),
BPF_MAP_TYPE_QUEUE => Map::Queue(map),
BPF_MAP_TYPE_CPUMAP => Map::CpuMap(map),
BPF_MAP_TYPE_DEVMAP => Map::DevMap(map),
BPF_MAP_TYPE_DEVMAP_HASH => Map::DevMapHash(map),
BPF_MAP_TYPE_XSKMAP => Map::XskMap(map),
m => {
warn!("The map {name} is of type {:#?} which is currently unsupported in Aya, use `allow_unsupported_maps()` to load it anyways", m);
Map::Unsupported(map)

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -61,7 +62,7 @@ impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> {
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let value =
bpf_map_lookup_elem(fd, index, flags).map_err(|(_, io_error)| SyscallError {
@ -88,7 +89,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Array<T, V> {
pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -80,7 +81,7 @@ impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
pub fn get(&self, index: &u32, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let value = bpf_map_lookup_elem_per_cpu(fd, index, flags).map_err(|(_, io_error)| {
SyscallError {
@ -108,7 +109,7 @@ impl<T: BorrowMut<MapData>, V: Pod> PerCpuArray<T, V> {
pub fn set(&mut self, index: u32, values: PerCpuValues<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
bpf_map_update_elem_per_cpu(fd, &index, &values, flags).map_err(|(_, io_error)| {
SyscallError {

@ -74,7 +74,7 @@ impl<T: BorrowMut<MapData>> ProgramArray<T> {
pub fn set(&mut self, index: u32, program: &ProgramFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd;
let fd = data.fd().as_fd();
let prog_fd = program.as_fd();
let prog_fd = prog_fd.as_raw_fd();
@ -94,7 +94,7 @@ impl<T: BorrowMut<MapData>> ProgramArray<T> {
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, *index)?;
let fd = self.inner.borrow_mut().fd;
let fd = data.fd().as_fd();
bpf_map_delete_elem(fd, index)
.map(|_| ())

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -52,7 +53,7 @@ impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
/// Query the existence of the element.
pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_lookup_elem_ptr::<u32, _>(fd, None, &mut value, flags)
.map_err(|(_, io_error)| SyscallError {
@ -67,7 +68,7 @@ impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> BloomFilter<T, V> {
/// Inserts a value into the map.
pub fn insert(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow_mut().fd;
let fd = self.inner.borrow_mut().fd().as_fd();
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_push_elem",
io_error,

@ -1,6 +1,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -52,7 +53,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Returns a copy of the value associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,

@ -4,6 +4,7 @@ use crate::{
sys::{bpf_map_delete_elem, bpf_map_update_elem, SyscallError},
Pod,
};
use std::os::fd::AsFd as _;
#[allow(clippy::module_inception)]
mod hash_map;
@ -20,7 +21,7 @@ pub(crate) fn insert<K: Pod, V: Pod>(
value: &V,
flags: u64,
) -> Result<(), MapError> {
let fd = map.fd;
let fd = map.fd().as_fd();
bpf_map_update_elem(fd, Some(key), value, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
@ -30,7 +31,7 @@ pub(crate) fn insert<K: Pod, V: Pod>(
}
pub(crate) fn remove<K: Pod>(map: &MapData, key: &K) -> Result<(), MapError> {
let fd = map.fd;
let fd = map.fd().as_fd();
bpf_map_delete_elem(fd, key)
.map(|_| ())
.map_err(|(_, io_error)| {

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -61,7 +62,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Returns a slice of values - one for each CPU - associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let values =
bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
@ -118,7 +119,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
values: PerCpuValues<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.borrow_mut().fd;
let fd = self.inner.borrow_mut().fd().as_fd();
bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err(
|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -126,7 +127,7 @@ impl<T: Borrow<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie.
pub fn get(&self, key: &Key<K>, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
@ -155,7 +156,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
value: impl Borrow<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",
@ -170,7 +171,7 @@ impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
///
/// Both the prefix and data must match exactly - this method does not do a longest prefix match.
pub fn remove(&mut self, key: &Key<K>) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_delete_elem(fd, key)
.map(|_| ())
.map_err(|(_, io_error)| {

@ -18,17 +18,28 @@
//! the [`TryFrom`] or [`TryInto`] trait. For example:
//!
//! ```no_run
//! # #[derive(Debug, thiserror::Error)]
//! # enum Error {
//! # #[error(transparent)]
//! # IO(#[from] std::io::Error),
//! # #[error(transparent)]
//! # Map(#[from] aya::maps::MapError),
//! # #[error(transparent)]
//! # Program(#[from] aya::programs::ProgramError),
//! # #[error(transparent)]
//! # Bpf(#[from] aya::BpfError)
//! # }
//! # let mut bpf = aya::Bpf::load(&[])?;
//! use aya::maps::SockMap;
//! use aya::programs::SkMsg;
//!
//! let intercept_egress = SockMap::try_from(bpf.map_mut("INTERCEPT_EGRESS").unwrap())?;
//! let map_fd = intercept_egress.fd()?;
//! let map_fd = intercept_egress.fd().try_clone()?;
//! let prog: &mut SkMsg = bpf.program_mut("intercept_egress_packet").unwrap().try_into()?;
//! prog.load()?;
//! prog.attach(map_fd)?;
//! prog.attach(&map_fd)?;
//!
//! # Ok::<(), aya::BpfError>(())
//! # Ok::<(), Error>(())
//! ```
//!
//! # Maps and `Pod` values
@ -42,22 +53,22 @@ use std::{
marker::PhantomData,
mem,
ops::Deref,
os::fd::{AsFd as _, AsRawFd, BorrowedFd, IntoRawFd as _, OwnedFd, RawFd},
os::fd::{AsFd, BorrowedFd, OwnedFd},
path::Path,
ptr,
};
use crate::util::KernelVersion;
use libc::{getrlimit, rlimit, RLIMIT_MEMLOCK, RLIM_INFINITY};
use libc::{getrlimit, rlim_t, rlimit, RLIMIT_MEMLOCK, RLIM_INFINITY};
use log::warn;
use thiserror::Error;
use crate::{
obj::{self, parse_map_info},
obj::{self, parse_map_info, BpfSectionKind},
pin::PinError,
sys::{
bpf_create_map, bpf_get_object, bpf_map_get_info_by_fd, bpf_map_get_next_key,
bpf_pin_object, SyscallError,
bpf_create_map, bpf_get_object, bpf_map_freeze, bpf_map_get_info_by_fd,
bpf_map_get_next_key, bpf_map_update_elem_ptr, bpf_pin_object, SyscallError,
},
util::nr_cpus,
PinningType, Pod,
@ -72,6 +83,7 @@ pub mod queue;
pub mod sock;
pub mod stack;
pub mod stack_trace;
pub mod xdp;
pub use array::{Array, PerCpuArray, ProgramArray};
pub use bloom_filter::BloomFilter;
@ -85,6 +97,7 @@ pub use queue::Queue;
pub use sock::{SockHash, SockMap};
pub use stack::Stack;
pub use stack_trace::StackTraceMap;
pub use xdp::{CpuMap, DevMap, DevMapHash, XskMap};
#[derive(Error, Debug)]
/// Errors occuring from working with Maps
@ -168,6 +181,10 @@ pub enum MapError {
error: PinError,
},
/// Program IDs are not supported
#[error("program ids are not supported by the current kernel")]
ProgIdNotSupported,
/// Unsupported Map type
#[error("Unsupported map type found {map_type}")]
Unsupported {
@ -177,25 +194,13 @@ pub enum MapError {
}
/// A map file descriptor.
pub struct MapFd(RawFd);
impl AsRawFd for MapFd {
fn as_raw_fd(&self) -> RawFd {
self.0
}
}
#[derive(Debug)]
pub struct MapFd(OwnedFd);
#[derive(PartialEq, Eq, PartialOrd, Ord)]
struct RlimitSize(usize);
impl fmt::Display for RlimitSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.0 < 1024 {
write!(f, "{} bytes", self.0)
} else if self.0 < 1024 * 1024 {
write!(f, "{} KiB", self.0 / 1024)
} else {
write!(f, "{} MiB", self.0 / 1024 / 1024)
}
impl AsFd for MapFd {
fn as_fd(&self) -> BorrowedFd<'_> {
let Self(fd) = self;
fd.as_fd()
}
}
@ -207,15 +212,28 @@ fn maybe_warn_rlimit() {
if ret == 0 {
let limit = unsafe { limit.assume_init() };
let limit: RlimitSize = RlimitSize(limit.rlim_cur.try_into().unwrap());
if limit.0 == RLIM_INFINITY.try_into().unwrap() {
if limit.rlim_cur == RLIM_INFINITY {
return;
}
struct HumanSize(rlim_t);
impl fmt::Display for HumanSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let &Self(size) = self;
if size < 1024 {
write!(f, "{} bytes", size)
} else if size < 1024 * 1024 {
write!(f, "{} KiB", size / 1024)
} else {
write!(f, "{} MiB", size / 1024 / 1024)
}
}
}
warn!(
"RLIMIT_MEMLOCK value is {}, not RLIM_INFNITY; if experiencing problems with creating \
maps, try raising RMILIT_MEMLOCK either to RLIM_INFINITY or to a higher value sufficient \
for size of your maps",
limit
"RLIMIT_MEMLOCK value is {}, not RLIM_INFINITY; if experiencing problems with creating \
maps, try raising RLIMIT_MEMLOCK either to RLIM_INFINITY or to a higher value sufficient \
for the size of your maps",
HumanSize(limit.rlim_cur)
);
}
}
@ -223,37 +241,45 @@ fn maybe_warn_rlimit() {
/// eBPF map types.
#[derive(Debug)]
pub enum Map {
/// A [`Array`] map
/// An [`Array`] map.
Array(MapData),
/// A [`PerCpuArray`] map
/// A [`PerCpuArray`] map.
PerCpuArray(MapData),
/// A [`ProgramArray`] map
/// A [`ProgramArray`] map.
ProgramArray(MapData),
/// A [`HashMap`] map
/// A [`HashMap`] map.
HashMap(MapData),
/// A [`PerCpuHashMap`] map
/// A [`PerCpuHashMap`] map.
PerCpuHashMap(MapData),
/// A [`HashMap`] map that uses a LRU eviction policy.
LruHashMap(MapData),
/// A [`PerCpuHashMap`] map that uses a LRU eviction policy.
PerCpuLruHashMap(MapData),
/// A [`PerfEventArray`] map
/// A [`PerfEventArray`] map.
PerfEventArray(MapData),
/// A [`SockMap`] map
/// A [`SockMap`] map.
SockMap(MapData),
/// A [`SockHash`] map
/// A [`SockHash`] map.
SockHash(MapData),
/// A [`BloomFilter`] map
/// A [`BloomFilter`] map.
BloomFilter(MapData),
/// A [`LpmTrie`] map
/// A [`LpmTrie`] map.
LpmTrie(MapData),
/// A [`Stack`] map
/// A [`Stack`] map.
Stack(MapData),
/// A [`StackTraceMap`] map
/// A [`StackTraceMap`] map.
StackTraceMap(MapData),
/// A [`Queue`] map
/// A [`Queue`] map.
Queue(MapData),
/// An unsupported map type
/// A [`CpuMap`] map.
CpuMap(MapData),
/// A [`DevMap`] map.
DevMap(MapData),
/// A [`DevMapHash`] map.
DevMapHash(MapData),
/// A [`XskMap`] map.
XskMap(MapData),
/// An unsupported map type.
Unsupported(MapData),
}
@ -276,6 +302,10 @@ impl Map {
Self::Stack(map) => map.obj.map_type(),
Self::StackTraceMap(map) => map.obj.map_type(),
Self::Queue(map) => map.obj.map_type(),
Self::CpuMap(map) => map.obj.map_type(),
Self::DevMap(map) => map.obj.map_type(),
Self::DevMapHash(map) => map.obj.map_type(),
Self::XskMap(map) => map.obj.map_type(),
Self::Unsupported(map) => map.obj.map_type(),
}
}
@ -335,6 +365,10 @@ impl_try_from_map!(() {
SockMap,
PerfEventArray,
StackTraceMap,
CpuMap,
DevMap,
DevMapHash,
XskMap,
});
#[cfg(any(feature = "async_tokio", feature = "async_std"))]
@ -395,10 +429,8 @@ pub(crate) fn check_v_size<V>(map: &MapData) -> Result<(), MapError> {
/// You should never need to use this unless you're implementing a new map type.
#[derive(Debug)]
pub struct MapData {
pub(crate) obj: obj::Map,
pub(crate) fd: RawFd,
/// Indicates if this map has been pinned to bpffs
pub pinned: bool,
obj: obj::Map,
fd: MapFd,
}
impl MapData {
@ -426,14 +458,8 @@ impl MapData {
io_error,
}
})?;
#[allow(trivial_numeric_casts)]
let fd = fd as RawFd;
Ok(Self {
obj,
fd,
pinned: false,
})
let fd = MapFd(fd);
Ok(Self { obj, fd })
}
pub(crate) fn create_pinned<P: AsRef<Path>>(
@ -459,14 +485,14 @@ impl MapData {
call: "BPF_OBJ_GET",
io_error,
}) {
Ok(fd) => Ok(Self {
obj,
fd: fd.into_raw_fd(),
pinned: false,
}),
Ok(fd) => {
let fd = MapFd(fd);
Ok(Self { obj, fd })
}
Err(_) => {
let mut map = Self::create(obj, name, btf_fd)?;
map.pin(name, path).map_err(|error| MapError::PinError {
let path = path.join(name);
map.pin(&path).map_err(|error| MapError::PinError {
name: Some(name.into()),
error,
})?;
@ -475,6 +501,27 @@ impl MapData {
}
}
pub(crate) fn finalize(&mut self) -> Result<(), MapError> {
let Self { obj, fd } = self;
if !obj.data().is_empty() && obj.section_kind() != BpfSectionKind::Bss {
bpf_map_update_elem_ptr(fd.as_fd(), &0 as *const _, obj.data_mut().as_mut_ptr(), 0)
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
})
.map_err(MapError::from)?;
}
if obj.section_kind() == BpfSectionKind::Rodata {
bpf_map_freeze(fd.as_fd())
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_freeze",
io_error,
})
.map_err(MapError::from)?;
}
Ok(())
}
/// Loads a map from a pinned path in bpffs.
pub fn from_pin<P: AsRef<Path>>(path: P) -> Result<Self, MapError> {
use std::os::unix::ffi::OsStrExt as _;
@ -493,13 +540,13 @@ impl MapData {
call: "BPF_OBJ_GET",
io_error,
})?;
let fd = MapFd(fd);
let info = bpf_map_get_info_by_fd(fd.as_fd())?;
Ok(Self {
obj: parse_map_info(info, PinningType::ByName),
fd: fd.into_raw_fd(),
pinned: true,
fd,
})
}
@ -511,56 +558,64 @@ impl MapData {
pub fn from_fd(fd: OwnedFd) -> Result<Self, MapError> {
let info = bpf_map_get_info_by_fd(fd.as_fd())?;
let fd = MapFd(fd);
Ok(Self {
obj: parse_map_info(info, PinningType::None),
fd: fd.into_raw_fd(),
pinned: false,
fd,
})
}
pub(crate) fn pin<P: AsRef<Path>>(&mut self, name: &str, path: P) -> Result<(), PinError> {
/// Allows the map to be pinned to the provided path.
///
/// Any directories in the the path provided should have been created by the caller.
/// The path must be on a BPF filesystem.
///
/// # Errors
///
/// Returns a [`PinError::SyscallError`] if the underlying syscall fails.
/// This may also happen if the path already exists, in which case the wrapped
/// [`std::io::Error`] kind will be [`std::io::ErrorKind::AlreadyExists`].
/// Returns a [`PinError::InvalidPinPath`] if the path provided cannot be
/// converted to a [`CString`].
///
/// # Example
///
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// # use aya::maps::MapData;
///
/// let mut map = MapData::from_pin("/sys/fs/bpf/my_map")?;
/// map.pin("/sys/fs/bpf/my_map2")?;
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn pin<P: AsRef<Path>>(&mut self, path: P) -> Result<(), PinError> {
use std::os::unix::ffi::OsStrExt as _;
let Self { fd, pinned, obj: _ } = self;
if *pinned {
return Err(PinError::AlreadyPinned { name: name.into() });
}
let path = path.as_ref().join(name);
let path_string = CString::new(path.as_os_str().as_bytes())
.map_err(|error| PinError::InvalidPinPath { path, error })?;
bpf_pin_object(*fd, &path_string).map_err(|(_, io_error)| SyscallError {
let Self { fd, obj: _ } = self;
let path = path.as_ref();
let path_string = CString::new(path.as_os_str().as_bytes()).map_err(|error| {
PinError::InvalidPinPath {
path: path.to_path_buf(),
error,
}
})?;
bpf_pin_object(fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
*pinned = true;
Ok(())
}
/// Returns the file descriptor of the map.
///
/// Can be converted to [`RawFd`] using [`AsRawFd`].
pub fn fd(&self) -> MapFd {
MapFd(self.fd)
}
}
impl Drop for MapData {
fn drop(&mut self) {
// TODO: Replace this with an OwnedFd once that is stabilized.
//
// SAFETY: `drop` is only called once.
unsafe { libc::close(self.fd) };
pub fn fd(&self) -> &MapFd {
let Self { obj: _, fd } = self;
fd
}
}
impl Clone for MapData {
fn clone(&self) -> Self {
let Self { obj, fd, pinned } = self;
Self {
obj: obj.clone(),
fd: unsafe { libc::dup(*fd) },
pinned: *pinned,
}
pub(crate) fn obj(&self) -> &obj::Map {
let Self { obj, fd: _ } = self;
obj
}
}
@ -598,7 +653,7 @@ impl<K: Pod> Iterator for MapKeys<'_, K> {
return None;
}
let fd = self.map.fd;
let fd = self.map.fd().as_fd();
let key =
bpf_map_get_next_key(fd, self.key.as_ref()).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_get_next_key",
@ -754,6 +809,7 @@ impl<T: Pod> Deref for PerCpuValues<T> {
mod tests {
use assert_matches::assert_matches;
use libc::EFAULT;
use std::os::fd::AsRawFd as _;
use crate::{
bpf_map_def,
@ -795,9 +851,8 @@ mod tests {
MapData::create(new_obj_map(), "foo", None),
Ok(MapData {
obj: _,
fd: 42,
pinned: false
})
fd,
}) => assert_eq!(fd.as_fd().as_raw_fd(), 42)
);
}

@ -4,7 +4,7 @@
use std::{
borrow::{Borrow, BorrowMut},
ops::Deref,
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd as _, AsRawFd, RawFd},
sync::Arc,
};
@ -181,7 +181,7 @@ impl<T: BorrowMut<MapData>> PerfEventArray<T> {
// FIXME: keep track of open buffers
let map_data: &MapData = self.map.deref().borrow();
let map_fd = map_data.fd;
let map_fd = map_data.fd().as_fd();
let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?;
bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0)
.map_err(|(_, io_error)| io_error)?;

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -60,7 +61,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
/// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| SyscallError {
@ -77,7 +78,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_push_elem",
io_error,

@ -5,17 +5,29 @@ mod sock_map;
pub use sock_hash::SockHash;
pub use sock_map::SockMap;
use std::os::fd::{AsFd, BorrowedFd, RawFd};
use std::{
io,
os::fd::{AsFd, BorrowedFd},
};
/// A socket map file descriptor.
#[derive(Copy, Clone)]
pub struct SockMapFd(RawFd);
#[repr(transparent)]
pub struct SockMapFd(super::MapFd);
impl SockMapFd {
/// Creates a new instance that shares the same underlying file description as [`self`].
pub fn try_clone(&self) -> io::Result<Self> {
let Self(inner) = self;
let super::MapFd(inner) = inner;
let inner = inner.try_clone()?;
let inner = super::MapFd(inner);
Ok(Self(inner))
}
}
impl AsFd for SockMapFd {
fn as_fd(&self) -> BorrowedFd<'_> {
// SAFETY: This isn't necessarily safe, we need to find ways
// to enforce that the file descriptor is still
// valid. TODO(#612)
unsafe { BorrowedFd::borrow_raw(self.0) }
let Self(fd) = self;
fd.as_fd()
}
}

@ -1,12 +1,13 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd as _, AsRawFd, RawFd},
};
use crate::{
maps::{
check_kv_size, hash_map, sock::SockMapFd, IterableMap, MapData, MapError, MapIter, MapKeys,
check_kv_size, hash_map, sock::SockMapFd, IterableMap, MapData, MapError, MapFd, MapIter,
MapKeys,
},
sys::{bpf_map_lookup_elem, SyscallError},
Pod,
@ -47,11 +48,11 @@ use crate::{
/// use aya::programs::SkMsg;
///
/// let mut intercept_egress = SockHash::<_, u32>::try_from(bpf.map("INTERCEPT_EGRESS").unwrap())?;
/// let map_fd = intercept_egress.fd()?;
/// let map_fd = intercept_egress.fd().try_clone()?;
///
/// let prog: &mut SkMsg = bpf.program_mut("intercept_egress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// let mut client = TcpStream::connect("127.0.0.1:1234")?;
/// let mut intercept_egress = SockHash::try_from(bpf.map_mut("INTERCEPT_EGRESS").unwrap())?;
@ -81,7 +82,7 @@ impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
/// Returns the fd of the socket stored at the given key.
pub fn get(&self, key: &K, flags: u64) -> Result<RawFd, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
@ -105,8 +106,11 @@ impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
///
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.borrow().fd))
pub fn fd(&self) -> &SockMapFd {
let fd: &MapFd = self.inner.borrow().fd();
// TODO(https://github.com/rust-lang/rfcs/issues/3066): avoid this unsafe.
// SAFETY: `SockMapFd` is #[repr(transparent)] over `MapFd`.
unsafe { std::mem::transmute(fd) }
}
}

@ -2,11 +2,11 @@
use std::{
borrow::{Borrow, BorrowMut},
os::fd::{AsRawFd, RawFd},
os::fd::{AsFd as _, AsRawFd, RawFd},
};
use crate::{
maps::{check_bounds, check_kv_size, sock::SockMapFd, MapData, MapError, MapKeys},
maps::{check_bounds, check_kv_size, sock::SockMapFd, MapData, MapError, MapFd, MapKeys},
sys::{bpf_map_delete_elem, bpf_map_update_elem, SyscallError},
};
@ -26,18 +26,29 @@ use crate::{
/// # Examples
///
/// ```no_run
/// # #[derive(Debug, thiserror::Error)]
/// # enum Error {
/// # #[error(transparent)]
/// # IO(#[from] std::io::Error),
/// # #[error(transparent)]
/// # Map(#[from] aya::maps::MapError),
/// # #[error(transparent)]
/// # Program(#[from] aya::programs::ProgramError),
/// # #[error(transparent)]
/// # Bpf(#[from] aya::BpfError)
/// # }
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::SockMap;
/// use aya::programs::SkSkb;
///
/// let intercept_ingress = SockMap::try_from(bpf.map("INTERCEPT_INGRESS").unwrap())?;
/// let map_fd = intercept_ingress.fd()?;
/// let map_fd = intercept_ingress.fd().try_clone()?;
///
/// let prog: &mut SkSkb = bpf.program_mut("intercept_ingress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// # Ok::<(), aya::BpfError>(())
/// # Ok::<(), Error>(())
/// ```
#[doc(alias = "BPF_MAP_TYPE_SOCKMAP")]
pub struct SockMap<T> {
@ -62,8 +73,11 @@ impl<T: Borrow<MapData>> SockMap<T> {
///
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.borrow().fd))
pub fn fd(&self) -> &SockMapFd {
let fd: &MapFd = self.inner.borrow().fd();
// TODO(https://github.com/rust-lang/rfcs/issues/3066): avoid this unsafe.
// SAFETY: `SockMapFd` is #[repr(transparent)] over `MapFd`.
unsafe { std::mem::transmute(&fd) }
}
}
@ -71,7 +85,7 @@ impl<T: BorrowMut<MapData>> SockMap<T> {
/// Stores a socket into the map.
pub fn set<I: AsRawFd>(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
let fd = data.fd;
let fd = data.fd().as_fd();
check_bounds(data, index)?;
bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err(
|(_, io_error)| SyscallError {
@ -85,7 +99,7 @@ impl<T: BorrowMut<MapData>> SockMap<T> {
/// Removes the socket stored at `index` from the map.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
let fd = data.fd;
let fd = data.fd().as_fd();
check_bounds(data, *index)?;
bpf_map_delete_elem(fd, index)
.map(|_| ())

@ -2,6 +2,7 @@
use std::{
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::fd::AsFd as _,
};
use crate::{
@ -60,7 +61,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
/// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| SyscallError {
@ -77,7 +78,7 @@ impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_update_elem",

@ -1,7 +1,7 @@
//! A hash map of kernel or user space stack traces.
//!
//! See [`StackTraceMap`] for documentation and examples.
use std::{borrow::Borrow, fs, io, mem, path::Path, str::FromStr};
use std::{borrow::Borrow, fs, io, mem, os::fd::AsFd as _, path::Path, str::FromStr};
use crate::{
maps::{IterableMap, MapData, MapError, MapIter, MapKeys},
@ -103,7 +103,7 @@ impl<T: Borrow<MapData>> StackTraceMap<T> {
/// Returns [`MapError::KeyNotFound`] if there is no stack trace with the
/// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, stack_id: &u32, flags: u64) -> Result<StackTrace, MapError> {
let fd = self.inner.borrow().fd;
let fd = self.inner.borrow().fd().as_fd();
let mut frames = vec![0; self.max_stack_depth];
bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags)

@ -0,0 +1,195 @@
//! An array of available CPUs.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_cpumap_val;
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, bpf_map_update_elem, SyscallError},
Pod, FEATURES,
};
use super::XdpMapError;
/// An array of available CPUs.
///
/// XDP programs can use this map to redirect packets to a target
/// CPU for processing.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.15.
///
/// # Examples
/// ```no_run
/// # let elf_bytes = &[];
/// use aya::maps::xdp::CpuMap;
///
/// let ncpus = aya::util::nr_cpus().unwrap() as u32;
/// let mut bpf = aya::BpfLoader::new()
/// .set_max_entries("CPUS", ncpus)
/// .load(elf_bytes)
/// .unwrap();
/// let mut cpumap = CpuMap::try_from(bpf.map_mut("CPUS").unwrap())?;
/// let flags = 0;
/// let queue_size = 2048;
/// for i in 0..ncpus {
/// cpumap.set(i, queue_size, None, flags);
/// }
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_cpumap.html>
#[doc(alias = "BPF_MAP_TYPE_CPUMAP")]
pub struct CpuMap<T> {
inner: T,
}
impl<T: Borrow<MapData>> CpuMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.cpumap_prog_id() {
check_kv_size::<u32, bpf_cpumap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
/// Returns the queue size and optional program for a given CPU index.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `cpu_index` is out of bounds,
/// [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, cpu_index: u32, flags: u64) -> Result<CpuMapValue, MapError> {
let data = self.inner.borrow();
check_bounds(data, cpu_index)?;
let fd = data.fd().as_fd();
let value = if FEATURES.cpumap_prog_id() {
bpf_map_lookup_elem::<_, bpf_cpumap_val>(fd, &cpu_index, flags).map(|value| {
value.map(|value| CpuMapValue {
queue_size: value.qsize,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6241
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &cpu_index, flags).map(|value| {
value.map(|qsize| CpuMapValue {
queue_size: qsize,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the map.
pub fn iter(&self) -> impl Iterator<Item = Result<CpuMapValue, MapError>> + '_ {
(0..self.len()).map(move |i| self.get(i, 0))
}
}
impl<T: BorrowMut<MapData>> CpuMap<T> {
/// Sets the queue size at the given CPU index, and optionally a chained program.
///
/// When sending the packet to the CPU at the given index, the kernel will queue up to
/// `queue_size` packets before dropping them.
///
/// Starting from Linux kernel 5.9, another XDP program can be passed in that will be run on the
/// target CPU, instead of the CPU that receives the packets. This allows to perform minimal
/// computations on CPUs that directly handle packets from a NIC's RX queues, and perform
/// possibly heavier ones in other, less busy CPUs.
///
/// The chained program must be loaded with the `BPF_XDP_CPUMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "cpumap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails, [`XdpMapError::ChainedProgramNotSupported`] if the kernel
/// does not support chained programs and one is provided.
pub fn set(
&mut self,
cpu_index: u32,
queue_size: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
let data = self.inner.borrow_mut();
check_bounds(data, cpu_index)?;
let fd = data.fd().as_fd();
let res = if FEATURES.cpumap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_cpumap_val>() };
value.qsize = queue_size;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/cpumap.c#L466
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
bpf_map_update_elem(fd, Some(&cpu_index), &value, flags)
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
bpf_map_update_elem(fd, Some(&cpu_index), &queue_size, flags)
};
res.map_err(|(_, io_error)| {
MapError::from(SyscallError {
call: "bpf_map_update_elem",
io_error,
})
})?;
Ok(())
}
}
impl<T: Borrow<MapData>> IterableMap<u32, CpuMapValue> for CpuMap<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<CpuMapValue, MapError> {
self.get(*key, 0)
}
}
unsafe impl Pod for bpf_cpumap_val {}
#[derive(Clone, Copy, Debug)]
/// The value of a CPU map.
pub struct CpuMapValue {
/// Size of the for the CPU.
pub queue_size: u32,
/// Chained XDP program ID.
pub prog_id: Option<NonZeroU32>,
}

@ -0,0 +1,187 @@
//! An array of network devices.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_devmap_val;
use crate::{
maps::{check_bounds, check_kv_size, IterableMap, MapData, MapError},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, bpf_map_update_elem, SyscallError},
Pod, FEATURES,
};
use super::XdpMapError;
/// An array of network devices.
///
/// XDP programs can use this map to redirect to other network
/// devices.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.14.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::xdp::DevMap;
///
/// let mut devmap = DevMap::try_from(bpf.map_mut("IFACES").unwrap())?;
/// // Lookups at index 2 will redirect packets to interface with index 3 (e.g. eth1)
/// devmap.set(2, 3, None, 0);
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_devmap.html>
#[doc(alias = "BPF_MAP_TYPE_DEVMAP")]
pub struct DevMap<T> {
inner: T,
}
impl<T: Borrow<MapData>> DevMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.devmap_prog_id() {
check_kv_size::<u32, bpf_devmap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
/// Returns the target interface index and optional program at a given index.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: u32, flags: u64) -> Result<DevMapValue, MapError> {
let data = self.inner.borrow();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
let value = if FEATURES.devmap_prog_id() {
bpf_map_lookup_elem::<_, bpf_devmap_val>(fd, &index, flags).map(|value| {
value.map(|value| DevMapValue {
if_index: value.ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6228
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &index, flags).map(|value| {
value.map(|ifindex| DevMapValue {
if_index: ifindex,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the array.
pub fn iter(&self) -> impl Iterator<Item = Result<DevMapValue, MapError>> + '_ {
(0..self.len()).map(move |i| self.get(i, 0))
}
}
impl<T: BorrowMut<MapData>> DevMap<T> {
/// Sets the target interface index at index, and optionally a chained program.
///
/// When redirecting using `index`, packets will be transmitted by the interface with
/// `target_if_index`.
///
/// Starting from Linux kernel 5.8, another XDP program can be passed in that will be run before
/// actual transmission. It can be used to modify the packet before transmission with NIC
/// specific data (MAC address update, checksum computations, etc) or other purposes.
///
/// The chained program must be loaded with the `BPF_XDP_DEVMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "devmap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails, [`MapError::ProgIdNotSupported`] if the kernel does not
/// support chained programs and one is provided.
pub fn set(
&mut self,
index: u32,
target_if_index: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
let res = if FEATURES.devmap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_devmap_val>() };
value.ifindex = target_if_index;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L866
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L918
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
bpf_map_update_elem(fd, Some(&index), &value, flags)
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
bpf_map_update_elem(fd, Some(&index), &target_if_index, flags)
};
res.map_err(|(_, io_error)| {
MapError::from(SyscallError {
call: "bpf_map_update_elem",
io_error,
})
})?;
Ok(())
}
}
impl<T: Borrow<MapData>> IterableMap<u32, DevMapValue> for DevMap<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<DevMapValue, MapError> {
self.get(*key, 0)
}
}
unsafe impl Pod for bpf_devmap_val {}
#[derive(Clone, Copy, Debug)]
/// The value of a device map.
pub struct DevMapValue {
/// Target interface index to redirect to.
pub if_index: u32,
/// Chained XDP program ID.
pub prog_id: Option<NonZeroU32>,
}

@ -0,0 +1,168 @@
//! An hashmap of network devices.
use std::{
borrow::{Borrow, BorrowMut},
num::NonZeroU32,
os::fd::{AsFd, AsRawFd},
};
use aya_obj::generated::bpf_devmap_val;
use crate::{
maps::{check_kv_size, hash_map, IterableMap, MapData, MapError, MapIter, MapKeys},
programs::ProgramFd,
sys::{bpf_map_lookup_elem, SyscallError},
FEATURES,
};
use super::{dev_map::DevMapValue, XdpMapError};
/// An hashmap of network devices.
///
/// XDP programs can use this map to redirect to other network
/// devices.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 5.4.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::xdp::DevMapHash;
///
/// let mut devmap = DevMapHash::try_from(bpf.map_mut("IFACES").unwrap())?;
/// // Lookups with key 2 will redirect packets to interface with index 3 (e.g. eth1)
/// devmap.insert(2, 3, None, 0);
///
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_devmap.html>
#[doc(alias = "BPF_MAP_TYPE_DEVMAP_HASH")]
pub struct DevMapHash<T> {
inner: T,
}
impl<T: Borrow<MapData>> DevMapHash<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
if FEATURES.devmap_prog_id() {
check_kv_size::<u32, bpf_devmap_val>(data)?;
} else {
check_kv_size::<u32, u32>(data)?;
}
Ok(Self { inner: map })
}
/// Returns the target interface index and optional program for a given key.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, key: u32, flags: u64) -> Result<DevMapValue, MapError> {
let fd = self.inner.borrow().fd().as_fd();
let value = if FEATURES.devmap_prog_id() {
bpf_map_lookup_elem::<_, bpf_devmap_val>(fd, &key, flags).map(|value| {
value.map(|value| DevMapValue {
if_index: value.ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/include/uapi/linux/bpf.h#L6228
prog_id: NonZeroU32::new(unsafe { value.bpf_prog.id }),
})
})
} else {
bpf_map_lookup_elem::<_, u32>(fd, &key, flags).map(|value| {
value.map(|ifindex| DevMapValue {
if_index: ifindex,
prog_id: None,
})
})
};
value
.map_err(|(_, io_error)| SyscallError {
call: "bpf_map_lookup_elem",
io_error,
})?
.ok_or(MapError::KeyNotFound)
}
/// An iterator over the elements of the devmap in arbitrary order.
pub fn iter(&self) -> MapIter<'_, u32, DevMapValue, Self> {
MapIter::new(self)
}
/// An iterator visiting all keys in arbitrary order.
pub fn keys(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.borrow())
}
}
impl<T: BorrowMut<MapData>> DevMapHash<T> {
/// Inserts an ifindex and optionally a chained program in the map.
///
/// When redirecting using `key`, packets will be transmitted by the interface with `ifindex`.
///
/// Starting from Linux kernel 5.8, another XDP program can be passed in that will be run before
/// actual transmission. It can be used to modify the packet before transmission with NIC
/// specific data (MAC address update, checksum computations, etc) or other purposes.
///
/// The chained program must be loaded with the `BPF_XDP_DEVMAP` attach type. When using
/// `aya-ebpf`, that means XDP programs that specify the `map = "devmap"` argument. See the
/// kernel-space `aya_ebpf::xdp` for more information.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_update_elem` fails,
/// [`MapError::ProgIdNotSupported`] if the kernel does not support chained programs and one is
/// provided.
pub fn insert(
&mut self,
key: u32,
target_if_index: u32,
program: Option<&ProgramFd>,
flags: u64,
) -> Result<(), XdpMapError> {
if FEATURES.devmap_prog_id() {
let mut value = unsafe { std::mem::zeroed::<bpf_devmap_val>() };
value.ifindex = target_if_index;
// Default is valid as the kernel will only consider fd > 0:
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L866
// https://github.com/torvalds/linux/blob/2dde18cd1d8fac735875f2e4987f11817cc0bc2c/kernel/bpf/devmap.c#L918
value.bpf_prog.fd = program
.map(|prog| prog.as_fd().as_raw_fd())
.unwrap_or_default();
hash_map::insert(self.inner.borrow_mut(), &key, &value, flags)?;
} else {
if program.is_some() {
return Err(XdpMapError::ChainedProgramNotSupported);
}
hash_map::insert(self.inner.borrow_mut(), &key, &target_if_index, flags)?;
}
Ok(())
}
/// Removes a value from the map.
///
/// # Errors
///
/// Returns [`MapError::SyscallError`] if `bpf_map_delete_elem` fails.
pub fn remove(&mut self, key: u32) -> Result<(), MapError> {
hash_map::remove(self.inner.borrow_mut(), &key)
}
}
impl<T: Borrow<MapData>> IterableMap<u32, DevMapValue> for DevMapHash<T> {
fn map(&self) -> &MapData {
self.inner.borrow()
}
fn get(&self, key: &u32) -> Result<DevMapValue, MapError> {
self.get(*key, 0)
}
}

@ -0,0 +1,25 @@
//! XDP maps.
mod cpu_map;
mod dev_map;
mod dev_map_hash;
mod xsk_map;
pub use cpu_map::CpuMap;
pub use dev_map::DevMap;
pub use dev_map_hash::DevMapHash;
pub use xsk_map::XskMap;
use super::MapError;
use thiserror::Error;
#[derive(Error, Debug)]
/// Errors occuring from working with XDP maps.
pub enum XdpMapError {
/// Chained programs are not supported.
#[error("chained programs are not supported by the current kernel")]
ChainedProgramNotSupported,
/// Map operation failed.
#[error(transparent)]
MapError(#[from] MapError),
}

@ -0,0 +1,81 @@
//! An array of AF_XDP sockets.
use std::{
borrow::{Borrow, BorrowMut},
os::fd::{AsFd, AsRawFd, RawFd},
};
use crate::{
maps::{check_bounds, check_kv_size, MapData, MapError},
sys::{bpf_map_update_elem, SyscallError},
};
/// An array of AF_XDP sockets.
///
/// XDP programs can use this map to redirect packets to a target
/// AF_XDP socket using the `XDP_REDIRECT` action.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.18.
///
/// # Examples
/// ```no_run
/// # let mut bpf = aya::Bpf::load(&[])?;
/// # let socket_fd = 1;
/// use aya::maps::XskMap;
///
/// let mut xskmap = XskMap::try_from(bpf.map_mut("SOCKETS").unwrap())?;
/// // socket_fd is the RawFd of an AF_XDP socket
/// xskmap.set(0, socket_fd, 0);
/// # Ok::<(), aya::BpfError>(())
/// ```
///
/// # See also
///
/// Kernel documentation: <https://docs.kernel.org/next/bpf/map_xskmap.html>
#[doc(alias = "BPF_MAP_TYPE_XSKMAP")]
pub struct XskMap<T> {
inner: T,
}
impl<T: Borrow<MapData>> XskMap<T> {
pub(crate) fn new(map: T) -> Result<Self, MapError> {
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
Ok(Self { inner: map })
}
/// Returns the number of elements in the array.
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.borrow().obj.max_entries()
}
}
impl<T: BorrowMut<MapData>> XskMap<T> {
/// Sets the `AF_XDP` socket at a given index.
///
/// When redirecting a packet, the `AF_XDP` socket at `index` will recieve the packet. Note
/// that it will do so only if the socket is bound to the same queue the packet was recieved
/// on.
///
/// # Errors
///
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, socket_fd: impl AsRawFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd().as_fd();
bpf_map_update_elem(fd, Some(&index), &socket_fd.as_raw_fd(), flags).map_err(
|(_, io_error)| SyscallError {
call: "bpf_map_update_elem",
io_error,
},
)?;
Ok(())
}
}

@ -6,12 +6,6 @@ use thiserror::Error;
/// An error ocurred working with a pinned BPF object.
#[derive(Error, Debug)]
pub enum PinError {
/// The object has already been pinned.
#[error("the BPF object `{name}` has already been pinned")]
AlreadyPinned {
/// Object name.
name: String,
},
/// The object FD is not known by Aya.
#[error("the BPF object `{name}`'s FD is not known")]
NoFd {

@ -154,11 +154,9 @@ impl FdLink {
error,
}
})?;
bpf_pin_object(self.fd.as_raw_fd(), &path_string).map_err(|(_, io_error)| {
SyscallError {
call: "BPF_OBJ_PIN",
io_error,
}
bpf_pin_object(self.fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
Ok(PinnedLink::new(path.into(), self))
}

@ -218,9 +218,8 @@ pub enum ProgramError {
pub struct ProgramFd(OwnedFd);
impl ProgramFd {
/// Creates a new `ProgramFd` instance that shares the same underlying file
/// description as the existing `ProgramFd` instance.
pub fn try_clone(&self) -> Result<Self, ProgramError> {
/// Creates a new instance that shares the same underlying file description as [`self`].
pub fn try_clone(&self) -> io::Result<Self> {
let Self(inner) = self;
let inner = inner.try_clone()?;
Ok(Self(inner))
@ -410,6 +409,39 @@ impl Program {
Self::CgroupDevice(p) => p.fd(),
}
}
/// Returns information about a loaded program with the [`ProgramInfo`] structure.
///
/// This information is populated at load time by the kernel and can be used
/// to get kernel details for a given [`Program`].
pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
match self {
Self::KProbe(p) => p.info(),
Self::UProbe(p) => p.info(),
Self::TracePoint(p) => p.info(),
Self::SocketFilter(p) => p.info(),
Self::Xdp(p) => p.info(),
Self::SkMsg(p) => p.info(),
Self::SkSkb(p) => p.info(),
Self::SockOps(p) => p.info(),
Self::SchedClassifier(p) => p.info(),
Self::CgroupSkb(p) => p.info(),
Self::CgroupSysctl(p) => p.info(),
Self::CgroupSockopt(p) => p.info(),
Self::LircMode2(p) => p.info(),
Self::PerfEvent(p) => p.info(),
Self::RawTracePoint(p) => p.info(),
Self::Lsm(p) => p.info(),
Self::BtfTracePoint(p) => p.info(),
Self::FEntry(p) => p.info(),
Self::FExit(p) => p.info(),
Self::Extension(p) => p.info(),
Self::CgroupSockAddr(p) => p.info(),
Self::SkLookup(p) => p.info(),
Self::CgroupSock(p) => p.info(),
Self::CgroupDevice(p) => p.info(),
}
}
}
#[derive(Debug)]
@ -536,7 +568,7 @@ fn pin_program<T: Link, P: AsRef<Path>>(data: &ProgramData<T>, path: P) -> Resul
path: path.into(),
error,
})?;
bpf_pin_object(fd.as_fd().as_raw_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
bpf_pin_object(fd.as_fd(), &path_string).map_err(|(_, io_error)| SyscallError {
call: "BPF_OBJ_PIN",
io_error,
})?;
@ -847,7 +879,6 @@ macro_rules! impl_from_pin {
impl_from_pin!(
TracePoint,
SocketFilter,
Xdp,
SkMsg,
CgroupSysctl,
LircMode2,
@ -923,12 +954,12 @@ impl_try_from_program!(
/// This information is populated at load time by the kernel and can be used
/// to correlate a given [`Program`] to it's corresponding [`ProgramInfo`]
/// metadata.
macro_rules! impl_program_info {
macro_rules! impl_info {
($($struct_name:ident),+ $(,)?) => {
$(
impl $struct_name {
/// Returns the file descriptor of this Program.
pub fn program_info(&self) -> Result<ProgramInfo, ProgramError> {
pub fn info(&self) -> Result<ProgramInfo, ProgramError> {
let ProgramFd(fd) = self.fd()?;
ProgramInfo::new_from_fd(fd.as_fd())
@ -938,7 +969,7 @@ macro_rules! impl_program_info {
}
}
impl_program_info!(
impl_info!(
KProbe,
UProbe,
TracePoint,

@ -43,11 +43,11 @@ use crate::{
/// use aya::programs::SkMsg;
///
/// let intercept_egress: SockHash<_, u32> = bpf.map("INTERCEPT_EGRESS").unwrap().try_into()?;
/// let map_fd = intercept_egress.fd()?;
/// let map_fd = intercept_egress.fd().try_clone()?;
///
/// let prog: &mut SkMsg = bpf.program_mut("intercept_egress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// let mut client = TcpStream::connect("127.0.0.1:1234")?;
/// let mut intercept_egress: SockHash<_, u32> = bpf.map_mut("INTERCEPT_EGRESS").unwrap().try_into()?;
@ -77,7 +77,7 @@ impl SkMsg {
/// Attaches the program to the given sockmap.
///
/// The returned value can be used to detach, see [SkMsg::detach].
pub fn attach(&mut self, map: SockMapFd) -> Result<SkMsgLinkId, ProgramError> {
pub fn attach(&mut self, map: &SockMapFd) -> Result<SkMsgLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();
let link = ProgAttachLink::attach(prog_fd, map.as_fd(), BPF_SK_MSG_VERDICT)?;

@ -37,18 +37,29 @@ pub enum SkSkbKind {
/// # Examples
///
/// ```no_run
/// # #[derive(Debug, thiserror::Error)]
/// # enum Error {
/// # #[error(transparent)]
/// # IO(#[from] std::io::Error),
/// # #[error(transparent)]
/// # Map(#[from] aya::maps::MapError),
/// # #[error(transparent)]
/// # Program(#[from] aya::programs::ProgramError),
/// # #[error(transparent)]
/// # Bpf(#[from] aya::BpfError)
/// # }
/// # let mut bpf = aya::Bpf::load(&[])?;
/// use aya::maps::SockMap;
/// use aya::programs::SkSkb;
///
/// let intercept_ingress: SockMap<_> = bpf.map("INTERCEPT_INGRESS").unwrap().try_into()?;
/// let map_fd = intercept_ingress.fd()?;
/// let map_fd = intercept_ingress.fd().try_clone()?;
///
/// let prog: &mut SkSkb = bpf.program_mut("intercept_ingress_packet").unwrap().try_into()?;
/// prog.load()?;
/// prog.attach(map_fd)?;
/// prog.attach(&map_fd)?;
///
/// # Ok::<(), aya::BpfError>(())
/// # Ok::<(), Error>(())
/// ```
///
/// [socket maps]: crate::maps::sock
@ -70,7 +81,7 @@ impl SkSkb {
/// Attaches the program to the given socket map.
///
/// The returned value can be used to detach, see [SkSkb::detach].
pub fn attach(&mut self, map: SockMapFd) -> Result<SkSkbLinkId, ProgramError> {
pub fn attach(&mut self, map: &SockMapFd) -> Result<SkSkbLinkId, ProgramError> {
let prog_fd = self.fd()?;
let prog_fd = prog_fd.as_fd();

@ -74,7 +74,7 @@ pub(crate) fn boot_time() -> SystemTime {
};
let since_boot = get_time(libc::CLOCK_BOOTTIME);
let since_epoch = get_time(libc::CLOCK_REALTIME);
UNIX_EPOCH + since_boot - since_epoch
UNIX_EPOCH + since_epoch - since_boot
}
/// Get the specified information from a file descriptor's fdinfo.
@ -82,7 +82,7 @@ pub(crate) fn get_fdinfo(fd: BorrowedFd<'_>, key: &str) -> Result<u32, ProgramEr
let info = File::open(format!("/proc/self/fdinfo/{}", fd.as_raw_fd()))?;
let reader = BufReader::new(info);
for line in reader.lines() {
let line = line.map_err(ProgramError::IOError)?;
let line = line?;
if !line.contains(key) {
continue;
}

@ -12,18 +12,21 @@ use std::{
hash::Hash,
io,
os::fd::{AsFd as _, AsRawFd as _, BorrowedFd, RawFd},
path::Path,
};
use thiserror::Error;
use crate::{
generated::{
bpf_attach_type, bpf_link_type, bpf_prog_type, XDP_FLAGS_DRV_MODE, XDP_FLAGS_HW_MODE,
XDP_FLAGS_REPLACE, XDP_FLAGS_SKB_MODE, XDP_FLAGS_UPDATE_IF_NOEXIST,
bpf_link_type, bpf_prog_type, XDP_FLAGS_DRV_MODE, XDP_FLAGS_HW_MODE, XDP_FLAGS_REPLACE,
XDP_FLAGS_SKB_MODE, XDP_FLAGS_UPDATE_IF_NOEXIST,
},
obj::programs::XdpAttachType,
programs::{
define_link_wrapper, load_program, FdLink, Link, LinkError, ProgramData, ProgramError,
},
sys::{bpf_link_create, bpf_link_get_info_by_fd, bpf_link_update, netlink_set_xdp_fd},
VerifierLogLevel,
};
/// The type returned when attaching an [`Xdp`] program fails on kernels `< 5.9`.
@ -80,12 +83,13 @@ bitflags::bitflags! {
#[doc(alias = "BPF_PROG_TYPE_XDP")]
pub struct Xdp {
pub(crate) data: ProgramData<XdpLink>,
pub(crate) attach_type: XdpAttachType,
}
impl Xdp {
/// Loads the program inside the kernel.
pub fn load(&mut self) -> Result<(), ProgramError> {
self.data.expected_attach_type = Some(bpf_attach_type::BPF_XDP);
self.data.expected_attach_type = Some(self.attach_type.into());
load_program(bpf_prog_type::BPF_PROG_TYPE_XDP, &mut self.data)
}
@ -133,10 +137,18 @@ impl Xdp {
let prog_fd = prog_fd.as_fd();
if KernelVersion::current().unwrap() >= KernelVersion::new(5, 9, 0) {
// Unwrap safety: the function starts with `self.fd()?` that will succeed if and only
// if the program has been loaded, i.e. there is an fd. We get one by:
// - Using `Xdp::from_pin` that sets `expected_attach_type`
// - Calling `Xdp::attach` that sets `expected_attach_type`, as geting an `Xdp`
// instance trhough `Xdp:try_from(Program)` does not set any fd.
// So, in all cases where we have an fd, we have an expected_attach_type. Thus, if we
// reach this point, expected_attach_type is guaranteed to be Some(_).
let attach_type = self.data.expected_attach_type.unwrap();
let link_fd = bpf_link_create(
prog_fd,
LinkTarget::IfIndex(if_index),
bpf_attach_type::BPF_XDP,
attach_type,
None,
flags.bits(),
)
@ -163,6 +175,21 @@ impl Xdp {
}
}
/// Creates a program from a pinned entry on a bpffs.
///
/// Existing links will not be populated. To work with existing links you should use [`crate::programs::links::PinnedLink`].
///
/// On drop, any managed links are detached and the program is unloaded. This will not result in
/// the program being unloaded from the kernel if it is still pinned.
pub fn from_pin<P: AsRef<Path>>(
path: P,
attach_type: XdpAttachType,
) -> Result<Self, ProgramError> {
let mut data = ProgramData::from_pinned_path(path, VerifierLogLevel::default())?;
data.expected_attach_type = Some(attach_type.into());
Ok(Self { data, attach_type })
}
/// Detaches the program.
///
/// See [Xdp::attach].

@ -8,6 +8,7 @@ use std::{
};
use crate::util::KernelVersion;
use assert_matches::assert_matches;
use libc::{c_char, c_long, ENOENT, ENOSPC};
use obj::{
btf::{BtfEnum64, Enum64},
@ -38,7 +39,7 @@ pub(crate) fn bpf_create_map(
def: &obj::Map,
btf_fd: Option<BorrowedFd<'_>>,
kernel_version: KernelVersion,
) -> SysResult<c_long> {
) -> SysResult<OwnedFd> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_1 };
@ -91,13 +92,14 @@ pub(crate) fn bpf_create_map(
.copy_from_slice(unsafe { slice::from_raw_parts(name.as_ptr(), name_len) });
}
sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr)
// SAFETY: BPF_MAP_CREATE returns a new file descriptor.
unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) }
}
pub(crate) fn bpf_pin_object(fd: RawFd, path: &CStr) -> SysResult<c_long> {
pub(crate) fn bpf_pin_object(fd: BorrowedFd<'_>, path: &CStr) -> SysResult<c_long> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_4 };
u.bpf_fd = fd as u32;
u.bpf_fd = fd.as_raw_fd() as u32;
u.pathname = path.as_ptr() as u64;
sys_bpf(bpf_cmd::BPF_OBJ_PIN, &mut attr)
}
@ -194,7 +196,7 @@ pub(crate) fn bpf_load_program(
}
fn lookup<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: Option<&K>,
flags: u64,
cmd: bpf_cmd,
@ -203,7 +205,7 @@ fn lookup<K: Pod, V: Pod>(
let mut value = MaybeUninit::zeroed();
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
}
@ -218,7 +220,7 @@ fn lookup<K: Pod, V: Pod>(
}
pub(crate) fn bpf_map_lookup_elem<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: &K,
flags: u64,
) -> Result<Option<V>, (c_long, io::Error)> {
@ -226,7 +228,7 @@ pub(crate) fn bpf_map_lookup_elem<K: Pod, V: Pod>(
}
pub(crate) fn bpf_map_lookup_and_delete_elem<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: Option<&K>,
flags: u64,
) -> Result<Option<V>, (c_long, io::Error)> {
@ -234,7 +236,7 @@ pub(crate) fn bpf_map_lookup_and_delete_elem<K: Pod, V: Pod>(
}
pub(crate) fn bpf_map_lookup_elem_per_cpu<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: &K,
flags: u64,
) -> Result<Option<PerCpuValues<V>>, (c_long, io::Error)> {
@ -247,7 +249,7 @@ pub(crate) fn bpf_map_lookup_elem_per_cpu<K: Pod, V: Pod>(
}
pub(crate) fn bpf_map_lookup_elem_ptr<K: Pod, V>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: Option<&K>,
value: *mut V,
flags: u64,
@ -255,7 +257,7 @@ pub(crate) fn bpf_map_lookup_elem_ptr<K: Pod, V>(
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
}
@ -270,7 +272,7 @@ pub(crate) fn bpf_map_lookup_elem_ptr<K: Pod, V>(
}
pub(crate) fn bpf_map_update_elem<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: Option<&K>,
value: &V,
flags: u64,
@ -278,7 +280,7 @@ pub(crate) fn bpf_map_update_elem<K: Pod, V: Pod>(
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
}
@ -288,11 +290,15 @@ pub(crate) fn bpf_map_update_elem<K: Pod, V: Pod>(
sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &mut attr)
}
pub(crate) fn bpf_map_push_elem<V: Pod>(fd: RawFd, value: &V, flags: u64) -> SysResult<c_long> {
pub(crate) fn bpf_map_push_elem<V: Pod>(
fd: BorrowedFd<'_>,
value: &V,
flags: u64,
) -> SysResult<c_long> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
u.__bindgen_anon_1.value = value as *const _ as u64;
u.flags = flags;
@ -300,7 +306,7 @@ pub(crate) fn bpf_map_push_elem<V: Pod>(fd: RawFd, value: &V, flags: u64) -> Sys
}
pub(crate) fn bpf_map_update_elem_ptr<K, V>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: *const K,
value: *mut V,
flags: u64,
@ -308,7 +314,7 @@ pub(crate) fn bpf_map_update_elem_ptr<K, V>(
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
u.key = key as u64;
u.__bindgen_anon_1.value = value as u64;
u.flags = flags;
@ -317,7 +323,7 @@ pub(crate) fn bpf_map_update_elem_ptr<K, V>(
}
pub(crate) fn bpf_map_update_elem_per_cpu<K: Pod, V: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: &K,
values: &PerCpuValues<V>,
flags: u64,
@ -326,25 +332,25 @@ pub(crate) fn bpf_map_update_elem_per_cpu<K: Pod, V: Pod>(
bpf_map_update_elem_ptr(fd, key, mem.as_mut_ptr(), flags)
}
pub(crate) fn bpf_map_delete_elem<K: Pod>(fd: RawFd, key: &K) -> SysResult<c_long> {
pub(crate) fn bpf_map_delete_elem<K: Pod>(fd: BorrowedFd<'_>, key: &K) -> SysResult<c_long> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
u.key = key as *const _ as u64;
sys_bpf(bpf_cmd::BPF_MAP_DELETE_ELEM, &mut attr)
}
pub(crate) fn bpf_map_get_next_key<K: Pod>(
fd: RawFd,
fd: BorrowedFd<'_>,
key: Option<&K>,
) -> Result<Option<K>, (c_long, io::Error)> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let mut next_key = MaybeUninit::uninit();
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
}
@ -358,10 +364,10 @@ pub(crate) fn bpf_map_get_next_key<K: Pod>(
}
// since kernel 5.2
pub(crate) fn bpf_map_freeze(fd: RawFd) -> SysResult<c_long> {
pub(crate) fn bpf_map_freeze(fd: BorrowedFd<'_>) -> SysResult<c_long> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd as u32;
u.map_fd = fd.as_raw_fd() as u32;
sys_bpf(bpf_cmd::BPF_MAP_FREEZE, &mut attr)
}
@ -754,7 +760,7 @@ pub(crate) fn is_bpf_global_data_supported() -> bool {
);
if let Ok(map) = map {
insns[0].imm = map.fd;
insns[0].imm = map.fd().as_fd().as_raw_fd();
let gpl = b"GPL\0";
u.license = gpl.as_ptr() as u64;
@ -788,6 +794,28 @@ pub(crate) fn is_bpf_cookie_supported() -> bool {
bpf_prog_load(&mut attr).is_ok()
}
/// Tests whether CpuMap, DevMap and DevMapHash support program ids
pub(crate) fn is_prog_id_supported(map_type: bpf_map_type) -> bool {
assert_matches!(
map_type,
bpf_map_type::BPF_MAP_TYPE_CPUMAP
| bpf_map_type::BPF_MAP_TYPE_DEVMAP
| bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH
);
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_1 };
u.map_type = map_type as u32;
u.key_size = 4;
u.value_size = 8; // 4 for CPU ID, 8 for CPU ID + prog ID
u.max_entries = 1;
u.map_flags = 0;
// SAFETY: BPF_MAP_CREATE returns a new file descriptor.
unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) }.is_ok()
}
pub(crate) fn is_btf_supported() -> bool {
let mut btf = Btf::new();
let name_offset = btf.add_string("int");
@ -1067,4 +1095,28 @@ mod tests {
let supported = is_perf_link_supported();
assert!(!supported);
}
#[test]
fn test_prog_id_supported() {
override_syscall(|_call| Ok(42));
// Ensure that the three map types we can check are accepted
let supported = is_prog_id_supported(bpf_map_type::BPF_MAP_TYPE_CPUMAP);
assert!(supported);
let supported = is_prog_id_supported(bpf_map_type::BPF_MAP_TYPE_DEVMAP);
assert!(supported);
let supported = is_prog_id_supported(bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH);
assert!(supported);
override_syscall(|_call| Err((-1, io::Error::from_raw_os_error(EINVAL))));
let supported = is_prog_id_supported(bpf_map_type::BPF_MAP_TYPE_CPUMAP);
assert!(!supported);
}
#[test]
#[should_panic = "assertion failed: `BPF_MAP_TYPE_HASH` does not match `bpf_map_type::BPF_MAP_TYPE_CPUMAP | bpf_map_type::BPF_MAP_TYPE_DEVMAP |
bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH`"]
fn test_prog_id_supported_reject_types() {
is_prog_id_supported(bpf_map_type::BPF_MAP_TYPE_HASH);
}
}

@ -17,6 +17,7 @@ pub mod sock_hash;
pub mod sock_map;
pub mod stack;
pub mod stack_trace;
pub mod xdp;
pub use array::Array;
pub use bloom_filter::BloomFilter;
@ -30,3 +31,4 @@ pub use sock_hash::SockHash;
pub use sock_map::SockMap;
pub use stack::Stack;
pub use stack_trace::StackTrace;
pub use xdp::{CpuMap, DevMap, DevMapHash, XskMap};

@ -0,0 +1,120 @@
use core::{cell::UnsafeCell, mem};
use aya_bpf_bindings::bindings::bpf_cpumap_val;
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_CPUMAP},
maps::PinningType,
};
use super::try_redirect_map;
/// An array of available CPUs.
///
/// XDP programs can use this map to redirect packets to a target CPU for processing.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.15.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::CpuMap, programs::XdpContext};
///
/// #[map]
/// static MAP: CpuMap = CpuMap::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> i32 {
/// // Redirect to CPU 7 or drop packet if no entry found.
/// MAP.redirect(7, xdp_action::XDP_DROP as u64)
/// }
/// ```
#[repr(transparent)]
pub struct CpuMap {
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for CpuMap {}
impl CpuMap {
/// Creates a [`CpuMap`] with a set maximum number of elements.
///
/// In a CPU map, an entry represents a CPU core. Thus there should be as many entries as there
/// are CPU cores on the system. `max_entries` can be set to zero here, and updated by userspace
/// at runtime. Refer to the userspace documentation for more information.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::CpuMap};
///
/// #[map]
/// static MAP: CpuMap = CpuMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> CpuMap {
CpuMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_CPUMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_cpumap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
}),
}
}
/// Creates a [`CpuMap`] with a set maximum number of elements that can be pinned to the BPF
/// File System (bpffs).
///
/// See [`CpuMap::with_max_entries`] for more information.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::CpuMap};
///
/// #[map]
/// static MAP: CpuMap = CpuMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> CpuMap {
CpuMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_CPUMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_cpumap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
}),
}
}
/// Redirects the current packet on the CPU at `index`.
///
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
/// can be used as the XDP program's return code if a CPU cannot be found.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::CpuMap, programs::XdpContext};
///
/// #[map]
/// static MAP: CpuMap = CpuMap::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> u32 {
/// // Redirect to CPU 7 or drop packet if no entry found.
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
/// }
/// ```
#[inline(always)]
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
try_redirect_map(&self.def, index, flags)
}
}

@ -0,0 +1,155 @@
use core::{cell::UnsafeCell, mem, num::NonZeroU32, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_devmap_val;
use aya_bpf_cty::c_void;
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_DEVMAP},
helpers::bpf_map_lookup_elem,
maps::PinningType,
};
use super::try_redirect_map;
/// An array of network devices.
///
/// XDP programs can use this map to redirect packets to other network deviecs.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.14.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::DevMap, programs::XdpContext};
///
/// #[map]
/// static MAP: DevMap = DevMap::with_max_entries(1, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> i32 {
/// MAP.redirect(0, xdp_action::XDP_PASS as u64)
/// }
/// ```
#[repr(transparent)]
pub struct DevMap {
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for DevMap {}
impl DevMap {
/// Creates a [`DevMap`] with a set maximum number of elements.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMap};
///
/// #[map]
/// static MAP: DevMap = DevMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> DevMap {
DevMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_devmap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
}),
}
}
/// Creates a [`DevMap`] with a set maximum number of elements that can be pinned to the BPF
/// File System (bpffs).
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMap};
///
/// #[map]
/// static MAP: DevMap = DevMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> DevMap {
DevMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_devmap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
}),
}
}
/// Retrieves the interface index at `index` in the array.
///
/// To actually redirect a packet, see [`DevMap::redirect`].
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMap};
///
/// #[map]
/// static MAP: DevMap = DevMap::with_max_entries(1, 0);
///
/// let target_if_index = MAP.get(0).target_if_index;
///
/// // redirect to if_index
/// ```
#[inline(always)]
pub fn get(&self, index: u32) -> Option<DevMapValue> {
unsafe {
let value = bpf_map_lookup_elem(
self.def.get() as *mut _,
&index as *const _ as *const c_void,
);
NonNull::new(value as *mut bpf_devmap_val).map(|p| DevMapValue {
if_index: p.as_ref().ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/bpf.h#L6136
prog_id: NonZeroU32::new(p.as_ref().bpf_prog.id),
})
}
}
/// Redirects the current packet on the interface at `index`.
///
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
/// can be used as the XDP program's return code if a CPU cannot be found.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::DevMap, programs::XdpContext};
///
/// #[map]
/// static MAP: DevMap = DevMap::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> u32 {
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
/// }
/// ```
#[inline(always)]
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
try_redirect_map(&self.def, index, flags)
}
}
#[derive(Clone, Copy)]
/// The value of a device map.
pub struct DevMapValue {
/// Target interface index to redirect to.
pub if_index: u32,
/// Chained XDP program ID.
pub prog_id: Option<NonZeroU32>,
}

@ -0,0 +1,146 @@
use core::{cell::UnsafeCell, mem, num::NonZeroU32, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_devmap_val;
use aya_bpf_cty::c_void;
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH},
helpers::bpf_map_lookup_elem,
maps::PinningType,
};
use super::{dev_map::DevMapValue, try_redirect_map};
/// A map of network devices.
///
/// XDP programs can use this map to redirect packets to other network devices. It is similar to
/// [`DevMap`](super::DevMap), but is an hash map rather than an array. Keys do not need to be
/// contiguous nor start at zero, but there is a hashing cost to every lookup.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 5.4.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::DevMapHash, programs::XdpContext};
///
/// #[map]
/// static MAP: DevMapHash = DevMapHash::with_max_entries(1, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> i32 {
/// MAP.redirect(42, xdp_action::XDP_PASS as u64)
/// }
/// ```
#[repr(transparent)]
pub struct DevMapHash {
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for DevMapHash {}
impl DevMapHash {
/// Creates a [`DevMapHash`] with a set maximum number of elements.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMapHash};
///
/// #[map]
/// static MAP: DevMapHash = DevMapHash::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> DevMapHash {
DevMapHash {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP_HASH,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_devmap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
}),
}
}
/// Creates a [`DevMapHash`] with a set maximum number of elements that can be pinned to the BPF
/// File System (bpffs).
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMapHash};
///
/// #[map]
/// static MAP: DevMapHash = DevMapHash::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> DevMapHash {
DevMapHash {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP_HASH,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<bpf_devmap_val>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
}),
}
}
/// Retrieves the interface index with `key` in the map.
///
/// To actually redirect a packet, see [`DevMapHash::redirect`].
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::DevMapHash};
///
/// #[map]
/// static MAP: DevMapHash = DevMapHash::with_max_entries(1, 0);
///
/// let target_if_index = MAP.get(42).target_if_index;
///
/// // redirect to ifindex
/// ```
#[inline(always)]
pub fn get(&self, key: u32) -> Option<DevMapValue> {
unsafe {
let value =
bpf_map_lookup_elem(self.def.get() as *mut _, &key as *const _ as *const c_void);
NonNull::new(value as *mut bpf_devmap_val).map(|p| DevMapValue {
if_index: p.as_ref().ifindex,
// SAFETY: map writes use fd, map reads use id.
// https://elixir.bootlin.com/linux/v6.2/source/include/uapi/linux/bpf.h#L6136
prog_id: NonZeroU32::new(p.as_ref().bpf_prog.id),
})
}
}
/// Redirects the current packet on the interface at `key`.
///
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
/// can be used as the XDP program's return code if a CPU cannot be found.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::DevMapHash, programs::XdpContext};
///
/// #[map]
/// static MAP: DevMapHash = DevMapHash::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(_ctx: XdpContext) -> u32 {
/// MAP.redirect(7, 0).unwrap_or(xdp_action::XDP_DROP)
/// }
/// ```
#[inline(always)]
pub fn redirect(&self, key: u32, flags: u64) -> Result<u32, u32> {
try_redirect_map(&self.def, key, flags)
}
}

@ -0,0 +1,33 @@
mod cpu_map;
mod dev_map;
mod dev_map_hash;
mod xsk_map;
use core::cell::UnsafeCell;
use aya_bpf_bindings::{
bindings::{bpf_map_def, xdp_action::XDP_REDIRECT},
helpers::bpf_redirect_map,
};
pub use cpu_map::CpuMap;
pub use dev_map::DevMap;
pub use dev_map_hash::DevMapHash;
pub use xsk_map::XskMap;
/// Wrapper aroung the `bpf_redirect_map` function.
///
/// # Return value
///
/// - `Ok(XDP_REDIRECT)` on success.
/// - `Err(_)` of the lowest two bits of `flags` on failure.
#[inline(always)]
fn try_redirect_map(def: &UnsafeCell<bpf_map_def>, key: u32, flags: u64) -> Result<u32, u32> {
// Return XDP_REDIRECT on success, or the value of the two lower bits of the flags argument on
// error. Thus I have no idea why it returns a long (i64) instead of something saner, hence the
// unsigned_abs.
let ret = unsafe { bpf_redirect_map(def.get() as *mut _, key.into(), flags) };
match ret.unsigned_abs() as u32 {
XDP_REDIRECT => Ok(XDP_REDIRECT),
ret => Err(ret),
}
}

@ -0,0 +1,164 @@
use core::{cell::UnsafeCell, mem, ptr::NonNull};
use aya_bpf_bindings::bindings::bpf_xdp_sock;
use aya_bpf_cty::c_void;
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_XSKMAP},
helpers::bpf_map_lookup_elem,
maps::PinningType,
};
use super::try_redirect_map;
/// An array of AF_XDP sockets.
///
/// XDP programs can use this map to redirect packets to a target AF_XDP socket using the
/// `XDP_REDIRECT` action.
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.18.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::XskMap, programs::XdpContext};
///
/// #[map]
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(ctx, XdpContext) -> i32 {
/// let queue_id = unsafe { (*ctx.ctx).rx_queue_index };
/// MAP.redirect(queue_id, xdp_action::XDP_DROP as u64)
/// }
/// ```
///
/// # Queue management
///
/// Packets received on a RX queue can only be redirected to sockets bound on the same queue. Most
/// hardware NICs have multiple RX queue to spread the load across multiple CPU cores using RSS.
///
/// Three strategies are possible:
///
/// - Reduce the RX queue count to a single one. This option is great for development, but is
/// detrimental for performance as the single CPU core recieving packets will get overwhelmed.
/// Setting the queue count for a NIC can be achieved using `ethtool -L <ifname> combined 1`.
/// - Create a socket for every RX queue. Most modern NICs will have an RX queue per CPU thread, so
/// a socket per CPU thread is best for performance. To dynamically size the map depending on the
/// recieve queue count, see the userspace documentation of `CpuMap`.
/// - Create a single socket and use a [`CpuMap`](super::CpuMap) to redirect the packet to the
/// correct CPU core. This way, the packet is sent to another CPU, and a chained XDP program can
/// the redirect to the AF_XDP socket. Using a single socket simplifies the userspace code but
/// will not perform great unless not a lot of traffic is redirected to the socket. Regular
/// traffic however will not be impacted, contrary to reducing the queue count.
#[repr(transparent)]
pub struct XskMap {
def: UnsafeCell<bpf_map_def>,
}
unsafe impl Sync for XskMap {}
impl XskMap {
/// Creates a [`XskMap`] with a set maximum number of elements.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::XskMap};
///
/// #[map]
/// static SOCKS: XskMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> XskMap {
XskMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_XSKMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::None as u32,
}),
}
}
/// Creates a [`XskMap`] with a set maximum number of elements that can be pinned to the BPF
/// filesystem (bpffs).
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::XskMap};
///
/// #[map]
/// static SOCKS: XskMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> XskMap {
XskMap {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_XSKMAP,
key_size: mem::size_of::<u32>() as u32,
value_size: mem::size_of::<u32>() as u32,
max_entries,
map_flags: flags,
id: 0,
pinning: PinningType::ByName as u32,
}),
}
}
/// Retrieves the queue to which the socket is bound at `index` in the array.
///
/// To actually redirect a packet, see [`XskMap::redirect`].
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{macros::map, maps::XskMap};
///
/// #[map]
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
///
/// let queue_id = SOCKS.get(0);
/// ```
#[inline(always)]
pub fn get(&self, index: u32) -> Option<u32> {
unsafe {
let value = bpf_map_lookup_elem(
self.def.get() as *mut _,
&index as *const _ as *const c_void,
);
NonNull::new(value as *mut bpf_xdp_sock).map(|p| p.as_ref().queue_id)
}
}
/// Redirects the current packet to the AF_XDP socket at `index`.
///
/// The lower two bits of `flags` are used for the return code if the map lookup fails, which
/// can be used as the XDP program's return code if a matching socket cannot be found.
///
/// However, if the socket at `index` is bound to a RX queue which is not the current RX queue,
/// the packet will be dropped.
///
/// # Examples
///
/// ```rust,no_run
/// use aya_bpf::{bindings::xdp_action, macros::{map, xdp}, maps::XskMap, programs::XdpContext};
///
/// #[map]
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
///
/// #[xdp]
/// fn xdp(ctx, XdpContext) -> u32 {
/// let queue_id = unsafe { (*ctx.ctx).rx_queue_index };
/// MAP.redirect(queue_id, 0).unwrap_or(xdp_action::XDP_DROP)
/// }
/// ```
#[inline(always)]
pub fn redirect(&self, index: u32, flags: u64) -> Result<u32, u32> {
try_redirect_map(&self.def, index, flags)
}
}

@ -43,3 +43,15 @@ path = "src/bpf_probe_read.rs"
[[bin]]
name = "two_progs"
path = "src/two_progs.rs"
[[bin]]
name = "redirect"
path = "src/redirect.rs"
[[bin]]
name = "xdp_sec"
path = "src/xdp_sec.rs"
[[bin]]
name = "stack_argument"
path = "src/stack_argument.rs"

@ -0,0 +1,73 @@
#![no_std]
#![no_main]
use aya_bpf::{
bindings::xdp_action,
macros::{map, xdp},
maps::{Array, CpuMap, DevMap, DevMapHash, XskMap},
programs::XdpContext,
};
#[map]
static SOCKS: XskMap = XskMap::with_max_entries(1, 0);
#[map]
static DEVS: DevMap = DevMap::with_max_entries(1, 0);
#[map]
static DEVS_HASH: DevMapHash = DevMapHash::with_max_entries(1, 0);
#[map]
static CPUS: CpuMap = CpuMap::with_max_entries(1, 0);
/// Hits of a probe, used to test program chaining through CpuMap/DevMap.
/// The first slot counts how many times the "raw" xdp program got executed, while the second slot
/// counts how many times the map programs got executed.
/// This allows the test harness to assert that a specific step got executed.
#[map]
static mut HITS: Array<u32> = Array::with_max_entries(2, 0);
#[xdp]
pub fn redirect_sock(_ctx: XdpContext) -> u32 {
SOCKS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp]
pub fn redirect_dev(_ctx: XdpContext) -> u32 {
inc_hit(0);
DEVS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp]
pub fn redirect_dev_hash(_ctx: XdpContext) -> u32 {
inc_hit(0);
DEVS_HASH.redirect(10, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp]
pub fn redirect_cpu(_ctx: XdpContext) -> u32 {
inc_hit(0);
CPUS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp(map = "cpumap")]
pub fn redirect_cpu_chain(_ctx: XdpContext) -> u32 {
inc_hit(1);
xdp_action::XDP_PASS
}
#[xdp(map = "devmap")]
pub fn redirect_dev_chain(_ctx: XdpContext) -> u32 {
inc_hit(1);
xdp_action::XDP_PASS
}
#[inline(always)]
fn inc_hit(index: u32) {
if let Some(hit) = unsafe { HITS.get_ptr_mut(index) } {
unsafe { *hit += 1 };
}
}
#[cfg(not(test))]
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}

@ -0,0 +1,26 @@
#![no_std]
#![no_main]
use aya_bpf::{bindings::xdp_action::XDP_PASS, macros::xdp, programs::XdpContext};
macro_rules! probe {
($name:ident, ($($arg:ident $(= $value:literal)?),*) ) => {
#[xdp($($arg $(= $value)?),*)]
pub fn $name(_ctx: XdpContext) -> u32 {
XDP_PASS
}
};
}
probe!(xdp_plain, ());
probe!(xdp_frags, (frags));
probe!(xdp_cpumap, (map = "cpumap"));
probe!(xdp_devmap, (map = "devmap"));
probe!(xdp_frags_cpumap, (frags, map = "cpumap"));
probe!(xdp_frags_devmap, (frags, map = "devmap"));
#[cfg(not(test))]
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}

@ -17,7 +17,7 @@ struct {
__uint(max_entries, 1);
} map_2 SEC(".maps");
SEC("tracepoint")
SEC("uprobe")
int bpf_prog(void *ctx) {
__u32 key = 0;
__u64 twenty_four = 24;

@ -21,6 +21,8 @@ pub const RELOCATIONS: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "
pub const TWO_PROGS: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/two_progs"));
pub const BPF_PROBE_READ: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/bpf_probe_read"));
pub const REDIRECT: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/redirect"));
pub const XDP_SEC: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/xdp_sec"));
#[cfg(test)]
mod tests;

@ -7,3 +7,4 @@ mod rbpf;
mod relocations;
mod smoke;
mod stack_argument;
mod xdp;

@ -1,4 +1,8 @@
use std::{convert::TryInto as _, thread, time};
use std::{
convert::TryInto as _,
thread,
time::{Duration, SystemTime},
};
use aya::{
maps::Array,
@ -9,9 +13,10 @@ use aya::{
util::KernelVersion,
Bpf,
};
use aya_obj::programs::XdpAttachType;
const MAX_RETRIES: usize = 100;
const RETRY_DURATION: time::Duration = time::Duration::from_millis(10);
const RETRY_DURATION: Duration = Duration::from_millis(10);
#[test]
fn long_name() {
@ -36,11 +41,12 @@ fn multiple_btf_maps() {
let map_1: Array<_, u64> = bpf.take_map("map_1").unwrap().try_into().unwrap();
let map_2: Array<_, u64> = bpf.take_map("map_2").unwrap().try_into().unwrap();
let prog: &mut TracePoint = bpf.program_mut("bpf_prog").unwrap().try_into().unwrap();
let prog: &mut UProbe = bpf.program_mut("bpf_prog").unwrap().try_into().unwrap();
prog.load().unwrap();
prog.attach("sched", "sched_switch").unwrap();
prog.attach(Some("trigger_bpf_program"), 0, "/proc/self/exe", None)
.unwrap();
thread::sleep(time::Duration::from_secs(3));
trigger_bpf_program();
let key = 0;
let val_1 = map_1.get(&key, 0).unwrap();
@ -50,6 +56,12 @@ fn multiple_btf_maps() {
assert_eq!(val_2, 42);
}
#[no_mangle]
#[inline(never)]
pub extern "C" fn trigger_bpf_program() {
core::hint::black_box(trigger_bpf_program);
}
fn poll_loaded_program_id(name: &str) -> impl Iterator<Item = Option<u32>> + '_ {
std::iter::once(true)
.chain(std::iter::repeat(false))
@ -138,6 +150,28 @@ fn unload_xdp() {
assert_unloaded("pass");
}
#[test]
fn test_loaded_at() {
let mut bpf = Bpf::load(crate::TEST).unwrap();
let prog: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
let t1 = SystemTime::now();
prog.load().unwrap();
let t2 = SystemTime::now();
assert_loaded("pass");
let loaded_at = prog.info().unwrap().loaded_at();
let range = t1..t2;
assert!(
range.contains(&loaded_at),
"{range:?}.contains({loaded_at:?})"
);
prog.unload().unwrap();
assert_unloaded("pass");
}
#[test]
fn unload_kprobe() {
let mut bpf = Bpf::load(crate::TEST).unwrap();
@ -276,7 +310,7 @@ fn pin_lifecycle() {
// 2. Load program from bpffs but don't attach it
{
let _ = Xdp::from_pin("/sys/fs/bpf/aya-xdp-test-prog").unwrap();
let _ = Xdp::from_pin("/sys/fs/bpf/aya-xdp-test-prog", XdpAttachType::Interface).unwrap();
}
// should still be loaded since prog was pinned
@ -284,7 +318,8 @@ fn pin_lifecycle() {
// 3. Load program from bpffs and attach
{
let mut prog = Xdp::from_pin("/sys/fs/bpf/aya-xdp-test-prog").unwrap();
let mut prog =
Xdp::from_pin("/sys/fs/bpf/aya-xdp-test-prog", XdpAttachType::Interface).unwrap();
let link_id = prog.attach("lo", XdpFlags::default()).unwrap();
let link = prog.take_link(link_id).unwrap();
let fd_link: FdLink = link.try_into().unwrap();

@ -2,7 +2,7 @@ use core::{mem::size_of, ptr::null_mut, slice::from_raw_parts};
use std::collections::HashMap;
use assert_matches::assert_matches;
use aya_obj::{generated::bpf_insn, Object, ProgramSection};
use aya_obj::{generated::bpf_insn, programs::XdpAttachType, Object, ProgramSection};
#[test]
fn run_with_rbpf() {
@ -11,7 +11,10 @@ fn run_with_rbpf() {
assert_eq!(object.programs.len(), 1);
assert_matches!(
object.programs["pass"].section,
ProgramSection::Xdp { frags: true }
ProgramSection::Xdp {
frags: true,
attach_type: XdpAttachType::Interface
}
);
let instructions = &object
@ -40,7 +43,7 @@ fn use_map_with_rbpf() {
assert_eq!(object.programs.len(), 1);
assert_matches!(
object.programs["bpf_prog"].section,
ProgramSection::TracePoint { .. }
ProgramSection::UProbe { .. }
);
// Initialize maps:
@ -58,7 +61,7 @@ fn use_map_with_rbpf() {
);
let map_id = if name == "map_1" { 0 } else { 1 };
let fd = map_id as i32 | 0xCAFE00;
let fd = map_id as std::os::fd::RawFd | 0xCAFE00;
maps.insert(name.to_owned(), (fd, map.clone()));
unsafe {

@ -1,5 +1,3 @@
use std::time::Duration;
use aya::{programs::UProbe, Bpf};
#[test]
@ -7,7 +5,6 @@ fn relocations() {
let bpf = load_and_attach("test_64_32_call_relocs", crate::RELOCATIONS);
trigger_relocations_program();
std::thread::sleep(Duration::from_millis(100));
let m = aya::maps::Array::<_, u64>::try_from(bpf.map("RESULTS").unwrap()).unwrap();
assert_eq!(m.get(&0, 0).unwrap(), 1);
@ -24,7 +21,6 @@ fn text_64_64_reloc() {
m.set(1, 2, 0).unwrap();
trigger_relocations_program();
std::thread::sleep(Duration::from_millis(100));
assert_eq!(m.get(&0, 0).unwrap(), 2);
assert_eq!(m.get(&1, 0).unwrap(), 3);

@ -0,0 +1,98 @@
use std::{net::UdpSocket, time::Duration};
use aya::{
maps::{Array, CpuMap},
programs::{Xdp, XdpFlags},
Bpf,
};
use object::{Object, ObjectSection, ObjectSymbol, SymbolSection};
use crate::utils::NetNsGuard;
#[test]
fn prog_sections() {
let obj_file = object::File::parse(crate::XDP_SEC).unwrap();
ensure_symbol(&obj_file, "xdp", "xdp_plain");
ensure_symbol(&obj_file, "xdp.frags", "xdp_frags");
ensure_symbol(&obj_file, "xdp/cpumap", "xdp_cpumap");
ensure_symbol(&obj_file, "xdp/devmap", "xdp_devmap");
ensure_symbol(&obj_file, "xdp.frags/cpumap", "xdp_frags_cpumap");
ensure_symbol(&obj_file, "xdp.frags/devmap", "xdp_frags_devmap");
}
#[track_caller]
fn ensure_symbol(obj_file: &object::File, sec_name: &str, sym_name: &str) {
let sec = obj_file.section_by_name(sec_name).unwrap_or_else(|| {
let secs = obj_file
.sections()
.flat_map(|sec| sec.name().ok().map(|name| name.to_owned()))
.collect::<Vec<_>>();
panic!("section {sec_name} not found. available sections: {secs:?}");
});
let sec = SymbolSection::Section(sec.index());
let syms = obj_file
.symbols()
.filter(|sym| sym.section() == sec)
.filter_map(|sym| sym.name().ok())
.collect::<Vec<_>>();
assert!(
syms.contains(&sym_name),
"symbol not found. available symbols in section: {syms:?}"
);
}
#[test]
fn map_load() {
let bpf = Bpf::load(crate::XDP_SEC).unwrap();
bpf.program("xdp_plain").unwrap();
bpf.program("xdp_frags").unwrap();
bpf.program("xdp_cpumap").unwrap();
bpf.program("xdp_devmap").unwrap();
bpf.program("xdp_frags_cpumap").unwrap();
bpf.program("xdp_frags_devmap").unwrap();
}
#[test]
fn cpumap_chain() {
let _netns = NetNsGuard::new();
let mut bpf = Bpf::load(crate::REDIRECT).unwrap();
// Load our cpumap and our canary map
let mut cpus: CpuMap<_> = bpf.take_map("CPUS").unwrap().try_into().unwrap();
let hits: Array<_, u32> = bpf.take_map("HITS").unwrap().try_into().unwrap();
let xdp_chain_fd = {
// Load the chained program to run on the target CPU
let xdp: &mut Xdp = bpf
.program_mut("redirect_cpu_chain")
.unwrap()
.try_into()
.unwrap();
xdp.load().unwrap();
xdp.fd().unwrap()
};
cpus.set(0, 2048, Some(xdp_chain_fd), 0).unwrap();
// Load the main program
let xdp: &mut Xdp = bpf.program_mut("redirect_cpu").unwrap().try_into().unwrap();
xdp.load().unwrap();
xdp.attach("lo", XdpFlags::default()).unwrap();
let sock = UdpSocket::bind("127.0.0.1:1777").unwrap();
sock.set_read_timeout(Some(Duration::from_millis(1)))
.unwrap();
sock.send_to(b"hello cpumap", "127.0.0.1:1777").unwrap();
// Read back the packet to ensure it wenth through the entire network stack, including our two
// probes.
let mut buf = vec![0u8; 1000];
let n = sock.recv(&mut buf).unwrap();
assert_eq!(&buf[..n], b"hello cpumap");
assert_eq!(hits.get(&0, 0).unwrap(), 1);
assert_eq!(hits.get(&1, 0).unwrap(), 1);
}

@ -563,6 +563,114 @@ impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::stack_trace::StackTrace wh
pub fn aya_bpf::maps::stack_trace::StackTrace::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::stack_trace::StackTrace
pub fn aya_bpf::maps::stack_trace::StackTrace::from(t: T) -> T
pub mod aya_bpf::maps::xdp
#[repr(transparent)] pub struct aya_bpf::maps::xdp::CpuMap
impl aya_bpf::maps::CpuMap
pub const fn aya_bpf::maps::CpuMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::CpuMap
pub fn aya_bpf::maps::CpuMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::CpuMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::CpuMap
impl core::marker::Sync for aya_bpf::maps::CpuMap
impl core::marker::Send for aya_bpf::maps::CpuMap
impl core::marker::Unpin for aya_bpf::maps::CpuMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::CpuMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::CpuMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::CpuMap where U: core::convert::From<T>
pub fn aya_bpf::maps::CpuMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::CpuMap where U: core::convert::Into<T>
pub type aya_bpf::maps::CpuMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::CpuMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::CpuMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::CpuMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::CpuMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::CpuMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::CpuMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::CpuMap where T: core::marker::Sized
pub fn aya_bpf::maps::CpuMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::CpuMap where T: core::marker::Sized
pub fn aya_bpf::maps::CpuMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::CpuMap
pub fn aya_bpf::maps::CpuMap::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::xdp::DevMap
impl aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::get(&self, index: u32) -> core::option::Option<DevMapValue>
pub const fn aya_bpf::maps::DevMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::DevMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMap
impl core::marker::Sync for aya_bpf::maps::DevMap
impl core::marker::Send for aya_bpf::maps::DevMap
impl core::marker::Unpin for aya_bpf::maps::DevMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::DevMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::DevMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::DevMap where U: core::convert::From<T>
pub fn aya_bpf::maps::DevMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::DevMap where U: core::convert::Into<T>
pub type aya_bpf::maps::DevMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::DevMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::DevMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::DevMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::DevMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::DevMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::DevMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::DevMap where T: core::marker::Sized
pub fn aya_bpf::maps::DevMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::DevMap where T: core::marker::Sized
pub fn aya_bpf::maps::DevMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::xdp::DevMapHash
impl aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::get(&self, key: u32) -> core::option::Option<DevMapValue>
pub const fn aya_bpf::maps::DevMapHash::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::redirect(&self, key: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::DevMapHash::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMapHash
impl core::marker::Sync for aya_bpf::maps::DevMapHash
impl core::marker::Send for aya_bpf::maps::DevMapHash
impl core::marker::Unpin for aya_bpf::maps::DevMapHash
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::DevMapHash
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::DevMapHash
impl<T, U> core::convert::Into<U> for aya_bpf::maps::DevMapHash where U: core::convert::From<T>
pub fn aya_bpf::maps::DevMapHash::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::DevMapHash where U: core::convert::Into<T>
pub type aya_bpf::maps::DevMapHash::Error = core::convert::Infallible
pub fn aya_bpf::maps::DevMapHash::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::DevMapHash where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::DevMapHash::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::DevMapHash::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::DevMapHash where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::DevMapHash where T: core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::DevMapHash where T: core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::xdp::XskMap
impl aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::get(&self, index: u32) -> core::option::Option<u32>
pub const fn aya_bpf::maps::XskMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::XskMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::XskMap
impl core::marker::Sync for aya_bpf::maps::XskMap
impl core::marker::Send for aya_bpf::maps::XskMap
impl core::marker::Unpin for aya_bpf::maps::XskMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::XskMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::XskMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::XskMap where U: core::convert::From<T>
pub fn aya_bpf::maps::XskMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::XskMap where U: core::convert::Into<T>
pub type aya_bpf::maps::XskMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::XskMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::XskMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::XskMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::XskMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::XskMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::XskMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::XskMap where T: core::marker::Sized
pub fn aya_bpf::maps::XskMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::XskMap where T: core::marker::Sized
pub fn aya_bpf::maps::XskMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::Array<T>
impl<T> aya_bpf::maps::array::Array<T>
pub fn aya_bpf::maps::array::Array<T>::get(&self, index: u32) -> core::option::Option<&T>
@ -618,6 +726,86 @@ impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::bloom_filter::BloomFilter<
pub fn aya_bpf::maps::bloom_filter::BloomFilter<T>::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::bloom_filter::BloomFilter<T>
pub fn aya_bpf::maps::bloom_filter::BloomFilter<T>::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::CpuMap
impl aya_bpf::maps::CpuMap
pub const fn aya_bpf::maps::CpuMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::CpuMap
pub fn aya_bpf::maps::CpuMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::CpuMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::CpuMap
impl core::marker::Sync for aya_bpf::maps::CpuMap
impl core::marker::Send for aya_bpf::maps::CpuMap
impl core::marker::Unpin for aya_bpf::maps::CpuMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::CpuMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::CpuMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::CpuMap where U: core::convert::From<T>
pub fn aya_bpf::maps::CpuMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::CpuMap where U: core::convert::Into<T>
pub type aya_bpf::maps::CpuMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::CpuMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::CpuMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::CpuMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::CpuMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::CpuMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::CpuMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::CpuMap where T: core::marker::Sized
pub fn aya_bpf::maps::CpuMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::CpuMap where T: core::marker::Sized
pub fn aya_bpf::maps::CpuMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::CpuMap
pub fn aya_bpf::maps::CpuMap::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::DevMap
impl aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::get(&self, index: u32) -> core::option::Option<DevMapValue>
pub const fn aya_bpf::maps::DevMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::DevMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMap
impl core::marker::Sync for aya_bpf::maps::DevMap
impl core::marker::Send for aya_bpf::maps::DevMap
impl core::marker::Unpin for aya_bpf::maps::DevMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::DevMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::DevMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::DevMap where U: core::convert::From<T>
pub fn aya_bpf::maps::DevMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::DevMap where U: core::convert::Into<T>
pub type aya_bpf::maps::DevMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::DevMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::DevMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::DevMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::DevMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::DevMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::DevMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::DevMap where T: core::marker::Sized
pub fn aya_bpf::maps::DevMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::DevMap where T: core::marker::Sized
pub fn aya_bpf::maps::DevMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::DevMap
pub fn aya_bpf::maps::DevMap::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::DevMapHash
impl aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::get(&self, key: u32) -> core::option::Option<DevMapValue>
pub const fn aya_bpf::maps::DevMapHash::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::redirect(&self, key: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::DevMapHash::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::DevMapHash
impl core::marker::Sync for aya_bpf::maps::DevMapHash
impl core::marker::Send for aya_bpf::maps::DevMapHash
impl core::marker::Unpin for aya_bpf::maps::DevMapHash
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::DevMapHash
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::DevMapHash
impl<T, U> core::convert::Into<U> for aya_bpf::maps::DevMapHash where U: core::convert::From<T>
pub fn aya_bpf::maps::DevMapHash::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::DevMapHash where U: core::convert::Into<T>
pub type aya_bpf::maps::DevMapHash::Error = core::convert::Infallible
pub fn aya_bpf::maps::DevMapHash::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::DevMapHash where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::DevMapHash::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::DevMapHash::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::DevMapHash where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::DevMapHash where T: core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::DevMapHash where T: core::marker::Sized
pub fn aya_bpf::maps::DevMapHash::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::DevMapHash
pub fn aya_bpf::maps::DevMapHash::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::HashMap<K, V>
impl<K, V> aya_bpf::maps::hash_map::HashMap<K, V>
pub unsafe fn aya_bpf::maps::hash_map::HashMap<K, V>::get(&self, key: &K) -> core::option::Option<&V>
@ -1014,6 +1202,33 @@ impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::stack_trace::StackTrace wh
pub fn aya_bpf::maps::stack_trace::StackTrace::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::stack_trace::StackTrace
pub fn aya_bpf::maps::stack_trace::StackTrace::from(t: T) -> T
#[repr(transparent)] pub struct aya_bpf::maps::XskMap
impl aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::get(&self, index: u32) -> core::option::Option<u32>
pub const fn aya_bpf::maps::XskMap::pinned(max_entries: u32, flags: u32) -> aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::redirect(&self, index: u32, flags: u64) -> core::result::Result<u32, u32>
pub const fn aya_bpf::maps::XskMap::with_max_entries(max_entries: u32, flags: u32) -> aya_bpf::maps::XskMap
impl core::marker::Sync for aya_bpf::maps::XskMap
impl core::marker::Send for aya_bpf::maps::XskMap
impl core::marker::Unpin for aya_bpf::maps::XskMap
impl !core::panic::unwind_safe::RefUnwindSafe for aya_bpf::maps::XskMap
impl core::panic::unwind_safe::UnwindSafe for aya_bpf::maps::XskMap
impl<T, U> core::convert::Into<U> for aya_bpf::maps::XskMap where U: core::convert::From<T>
pub fn aya_bpf::maps::XskMap::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_bpf::maps::XskMap where U: core::convert::Into<T>
pub type aya_bpf::maps::XskMap::Error = core::convert::Infallible
pub fn aya_bpf::maps::XskMap::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_bpf::maps::XskMap where U: core::convert::TryFrom<T>
pub type aya_bpf::maps::XskMap::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_bpf::maps::XskMap::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> core::any::Any for aya_bpf::maps::XskMap where T: 'static + core::marker::Sized
pub fn aya_bpf::maps::XskMap::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_bpf::maps::XskMap where T: core::marker::Sized
pub fn aya_bpf::maps::XskMap::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_bpf::maps::XskMap where T: core::marker::Sized
pub fn aya_bpf::maps::XskMap::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_bpf::maps::XskMap
pub fn aya_bpf::maps::XskMap::from(t: T) -> T
pub mod aya_bpf::programs
pub mod aya_bpf::programs::device
pub struct aya_bpf::programs::device::DeviceContext

@ -1342,6 +1342,8 @@ impl core::convert::From<aya_obj::programs::cgroup_sock_addr::CgroupSockAddrAtta
pub fn aya_obj::generated::bpf_attach_type::from(s: aya_obj::programs::cgroup_sock_addr::CgroupSockAddrAttachType) -> aya_obj::generated::bpf_attach_type
impl core::convert::From<aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType> for aya_obj::generated::bpf_attach_type
pub fn aya_obj::generated::bpf_attach_type::from(s: aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType) -> aya_obj::generated::bpf_attach_type
impl core::convert::From<aya_obj::programs::xdp::XdpAttachType> for aya_obj::generated::bpf_attach_type
pub fn aya_obj::generated::bpf_attach_type::from(value: aya_obj::programs::xdp::XdpAttachType) -> Self
impl core::clone::Clone for aya_obj::generated::bpf_attach_type
pub fn aya_obj::generated::bpf_attach_type::clone(&self) -> aya_obj::generated::bpf_attach_type
impl core::cmp::Eq for aya_obj::generated::bpf_attach_type
@ -5411,6 +5413,7 @@ pub fn aya_obj::maps::Map::pinning(&self) -> aya_obj::maps::PinningType
pub fn aya_obj::maps::Map::section_index(&self) -> usize
pub fn aya_obj::maps::Map::section_kind(&self) -> aya_obj::BpfSectionKind
pub fn aya_obj::maps::Map::set_max_entries(&mut self, v: u32)
pub fn aya_obj::maps::Map::set_value_size(&mut self, size: u32)
pub fn aya_obj::maps::Map::symbol_index(&self) -> core::option::Option<usize>
pub fn aya_obj::maps::Map::value_size(&self) -> u32
impl core::clone::Clone for aya_obj::maps::Map
@ -5848,6 +5851,7 @@ pub aya_obj::obj::ProgramSection::UProbe::sleepable: bool
pub aya_obj::obj::ProgramSection::URetProbe
pub aya_obj::obj::ProgramSection::URetProbe::sleepable: bool
pub aya_obj::obj::ProgramSection::Xdp
pub aya_obj::obj::ProgramSection::Xdp::attach_type: aya_obj::programs::xdp::XdpAttachType
pub aya_obj::obj::ProgramSection::Xdp::frags: bool
impl core::str::traits::FromStr for aya_obj::ProgramSection
pub type aya_obj::ProgramSection::Err = aya_obj::ParseError
@ -5889,6 +5893,8 @@ pub fn aya_obj::Features::bpf_name(&self) -> bool
pub fn aya_obj::Features::bpf_perf_link(&self) -> bool
pub fn aya_obj::Features::bpf_probe_read_kernel(&self) -> bool
pub fn aya_obj::Features::btf(&self) -> core::option::Option<&aya_obj::btf::BtfFeatures>
pub fn aya_obj::Features::cpumap_prog_id(&self) -> bool
pub fn aya_obj::Features::devmap_prog_id(&self) -> bool
impl core::default::Default for aya_obj::Features
pub fn aya_obj::Features::default() -> aya_obj::Features
impl core::fmt::Debug for aya_obj::Features
@ -5972,7 +5978,7 @@ impl aya_obj::Object
pub fn aya_obj::Object::relocate_btf(&mut self, target_btf: &aya_obj::btf::Btf) -> core::result::Result<(), aya_obj::btf::BtfRelocationError>
impl aya_obj::Object
pub fn aya_obj::Object::relocate_calls(&mut self, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
pub fn aya_obj::Object::relocate_maps<'a, I: core::iter::traits::iterator::Iterator<Item = (&'a str, i32, &'a aya_obj::maps::Map)>>(&mut self, maps: I, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
pub fn aya_obj::Object::relocate_maps<'a, I: core::iter::traits::iterator::Iterator<Item = (&'a str, std::os::fd::raw::RawFd, &'a aya_obj::maps::Map)>>(&mut self, maps: I, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
impl core::clone::Clone for aya_obj::Object
pub fn aya_obj::Object::clone(&self) -> aya_obj::Object
impl core::fmt::Debug for aya_obj::Object
@ -6164,6 +6170,43 @@ impl<T> core::borrow::BorrowMut<T> for aya_obj::programs::cgroup_sockopt::Cgroup
pub fn aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType
pub fn aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType::from(t: T) -> T
pub mod aya_obj::programs::xdp
pub enum aya_obj::programs::xdp::XdpAttachType
pub aya_obj::programs::xdp::XdpAttachType::CpuMap
pub aya_obj::programs::xdp::XdpAttachType::DevMap
pub aya_obj::programs::xdp::XdpAttachType::Interface
impl core::convert::From<aya_obj::programs::xdp::XdpAttachType> for aya_obj::generated::bpf_attach_type
pub fn aya_obj::generated::bpf_attach_type::from(value: aya_obj::programs::xdp::XdpAttachType) -> Self
impl core::clone::Clone for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::clone(&self) -> aya_obj::programs::xdp::XdpAttachType
impl core::fmt::Debug for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result
impl core::marker::Copy for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Send for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Sync for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Unpin for aya_obj::programs::xdp::XdpAttachType
impl core::panic::unwind_safe::RefUnwindSafe for aya_obj::programs::xdp::XdpAttachType
impl core::panic::unwind_safe::UnwindSafe for aya_obj::programs::xdp::XdpAttachType
impl<T, U> core::convert::Into<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::From<T>
pub fn aya_obj::programs::xdp::XdpAttachType::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::Into<T>
pub type aya_obj::programs::xdp::XdpAttachType::Error = core::convert::Infallible
pub fn aya_obj::programs::xdp::XdpAttachType::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::TryFrom<T>
pub type aya_obj::programs::xdp::XdpAttachType::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_obj::programs::xdp::XdpAttachType::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> alloc::borrow::ToOwned for aya_obj::programs::xdp::XdpAttachType where T: core::clone::Clone
pub type aya_obj::programs::xdp::XdpAttachType::Owned = T
pub fn aya_obj::programs::xdp::XdpAttachType::clone_into(&self, target: &mut T)
pub fn aya_obj::programs::xdp::XdpAttachType::to_owned(&self) -> T
impl<T> core::any::Any for aya_obj::programs::xdp::XdpAttachType where T: 'static + core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_obj::programs::xdp::XdpAttachType where T: core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_obj::programs::xdp::XdpAttachType where T: core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::from(t: T) -> T
pub enum aya_obj::programs::CgroupSockAddrAttachType
pub aya_obj::programs::CgroupSockAddrAttachType::Bind4
pub aya_obj::programs::CgroupSockAddrAttachType::Bind6
@ -6283,6 +6326,42 @@ impl<T> core::borrow::BorrowMut<T> for aya_obj::programs::cgroup_sockopt::Cgroup
pub fn aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType
pub fn aya_obj::programs::cgroup_sockopt::CgroupSockoptAttachType::from(t: T) -> T
pub enum aya_obj::programs::XdpAttachType
pub aya_obj::programs::XdpAttachType::CpuMap
pub aya_obj::programs::XdpAttachType::DevMap
pub aya_obj::programs::XdpAttachType::Interface
impl core::convert::From<aya_obj::programs::xdp::XdpAttachType> for aya_obj::generated::bpf_attach_type
pub fn aya_obj::generated::bpf_attach_type::from(value: aya_obj::programs::xdp::XdpAttachType) -> Self
impl core::clone::Clone for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::clone(&self) -> aya_obj::programs::xdp::XdpAttachType
impl core::fmt::Debug for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result
impl core::marker::Copy for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Send for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Sync for aya_obj::programs::xdp::XdpAttachType
impl core::marker::Unpin for aya_obj::programs::xdp::XdpAttachType
impl core::panic::unwind_safe::RefUnwindSafe for aya_obj::programs::xdp::XdpAttachType
impl core::panic::unwind_safe::UnwindSafe for aya_obj::programs::xdp::XdpAttachType
impl<T, U> core::convert::Into<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::From<T>
pub fn aya_obj::programs::xdp::XdpAttachType::into(self) -> U
impl<T, U> core::convert::TryFrom<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::Into<T>
pub type aya_obj::programs::xdp::XdpAttachType::Error = core::convert::Infallible
pub fn aya_obj::programs::xdp::XdpAttachType::try_from(value: U) -> core::result::Result<T, <T as core::convert::TryFrom<U>>::Error>
impl<T, U> core::convert::TryInto<U> for aya_obj::programs::xdp::XdpAttachType where U: core::convert::TryFrom<T>
pub type aya_obj::programs::xdp::XdpAttachType::Error = <U as core::convert::TryFrom<T>>::Error
pub fn aya_obj::programs::xdp::XdpAttachType::try_into(self) -> core::result::Result<U, <U as core::convert::TryFrom<T>>::Error>
impl<T> alloc::borrow::ToOwned for aya_obj::programs::xdp::XdpAttachType where T: core::clone::Clone
pub type aya_obj::programs::xdp::XdpAttachType::Owned = T
pub fn aya_obj::programs::xdp::XdpAttachType::clone_into(&self, target: &mut T)
pub fn aya_obj::programs::xdp::XdpAttachType::to_owned(&self) -> T
impl<T> core::any::Any for aya_obj::programs::xdp::XdpAttachType where T: 'static + core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::type_id(&self) -> core::any::TypeId
impl<T> core::borrow::Borrow<T> for aya_obj::programs::xdp::XdpAttachType where T: core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::borrow(&self) -> &T
impl<T> core::borrow::BorrowMut<T> for aya_obj::programs::xdp::XdpAttachType where T: core::marker::Sized
pub fn aya_obj::programs::xdp::XdpAttachType::borrow_mut(&mut self) -> &mut T
impl<T> core::convert::From<T> for aya_obj::programs::xdp::XdpAttachType
pub fn aya_obj::programs::xdp::XdpAttachType::from(t: T) -> T
pub mod aya_obj::relocation
pub enum aya_obj::relocation::RelocationError
pub aya_obj::relocation::RelocationError::InvalidRelocationOffset
@ -6420,6 +6499,7 @@ pub fn aya_obj::maps::Map::pinning(&self) -> aya_obj::maps::PinningType
pub fn aya_obj::maps::Map::section_index(&self) -> usize
pub fn aya_obj::maps::Map::section_kind(&self) -> aya_obj::BpfSectionKind
pub fn aya_obj::maps::Map::set_max_entries(&mut self, v: u32)
pub fn aya_obj::maps::Map::set_value_size(&mut self, size: u32)
pub fn aya_obj::maps::Map::symbol_index(&self) -> core::option::Option<usize>
pub fn aya_obj::maps::Map::value_size(&self) -> u32
impl core::clone::Clone for aya_obj::maps::Map
@ -6560,6 +6640,7 @@ pub aya_obj::ProgramSection::UProbe::sleepable: bool
pub aya_obj::ProgramSection::URetProbe
pub aya_obj::ProgramSection::URetProbe::sleepable: bool
pub aya_obj::ProgramSection::Xdp
pub aya_obj::ProgramSection::Xdp::attach_type: aya_obj::programs::xdp::XdpAttachType
pub aya_obj::ProgramSection::Xdp::frags: bool
impl core::str::traits::FromStr for aya_obj::ProgramSection
pub type aya_obj::ProgramSection::Err = aya_obj::ParseError
@ -6601,6 +6682,8 @@ pub fn aya_obj::Features::bpf_name(&self) -> bool
pub fn aya_obj::Features::bpf_perf_link(&self) -> bool
pub fn aya_obj::Features::bpf_probe_read_kernel(&self) -> bool
pub fn aya_obj::Features::btf(&self) -> core::option::Option<&aya_obj::btf::BtfFeatures>
pub fn aya_obj::Features::cpumap_prog_id(&self) -> bool
pub fn aya_obj::Features::devmap_prog_id(&self) -> bool
impl core::default::Default for aya_obj::Features
pub fn aya_obj::Features::default() -> aya_obj::Features
impl core::fmt::Debug for aya_obj::Features
@ -6684,7 +6767,7 @@ impl aya_obj::Object
pub fn aya_obj::Object::relocate_btf(&mut self, target_btf: &aya_obj::btf::Btf) -> core::result::Result<(), aya_obj::btf::BtfRelocationError>
impl aya_obj::Object
pub fn aya_obj::Object::relocate_calls(&mut self, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
pub fn aya_obj::Object::relocate_maps<'a, I: core::iter::traits::iterator::Iterator<Item = (&'a str, i32, &'a aya_obj::maps::Map)>>(&mut self, maps: I, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
pub fn aya_obj::Object::relocate_maps<'a, I: core::iter::traits::iterator::Iterator<Item = (&'a str, std::os::fd::raw::RawFd, &'a aya_obj::maps::Map)>>(&mut self, maps: I, text_sections: &std::collections::hash::set::HashSet<usize>) -> core::result::Result<(), aya_obj::relocation::BpfRelocationError>
impl core::clone::Clone for aya_obj::Object
pub fn aya_obj::Object::clone(&self) -> aya_obj::Object
impl core::fmt::Debug for aya_obj::Object

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save