aya: add feature probing for map type

Add API that probes whether kernel supports a map type.

Assertion for `InodeStorage` are disabled because they require
CONFIG_BPF_LSM to be enabled, which is not be default in VM tests.
pull/1063/head
Tyrone Wu 3 months ago
parent 359b3eafe1
commit 27c28a60ed
No known key found for this signature in database
GPG Key ID: 978B1A1B79210AD6

@ -677,7 +677,7 @@ pub(crate) fn bpf_load_btf(
} }
// SAFETY: only use for bpf_cmd that return a new file descriptor on success. // SAFETY: only use for bpf_cmd that return a new file descriptor on success.
unsafe fn fd_sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> SysResult<crate::MockableFd> { pub(super) unsafe fn fd_sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> SysResult<crate::MockableFd> {
let fd = sys_bpf(cmd, attr)?; let fd = sys_bpf(cmd, attr)?;
let fd = fd.try_into().map_err(|_| { let fd = fd.try_into().map_err(|_| {
( (

@ -1,12 +1,19 @@
//! Probes and identifies available eBPF features supported by the host kernel. //! Probes and identifies available eBPF features supported by the host kernel.
use std::mem; use std::{mem, os::fd::AsRawFd};
use aya_obj::generated::{bpf_attach_type, bpf_attr, bpf_insn, BPF_F_SLEEPABLE}; use aya_obj::generated::{
bpf_attach_type, bpf_attr, bpf_cmd, bpf_insn, BPF_F_MMAPABLE, BPF_F_NO_PREALLOC,
BPF_F_SLEEPABLE,
};
use libc::{E2BIG, EINVAL}; use libc::{E2BIG, EINVAL};
use super::{bpf_prog_load, SyscallError}; use super::{bpf_prog_load, fd_sys_bpf, SyscallError};
use crate::{programs::ProgramType, util::KernelVersion}; use crate::{
maps::MapType,
programs::ProgramType,
util::{page_size, KernelVersion},
};
const RETURN_ZERO_INSNS: &[bpf_insn] = &[ const RETURN_ZERO_INSNS: &[bpf_insn] = &[
bpf_insn::new(0xb7, 0, 0, 0, 0), // mov64 r0 = 0 bpf_insn::new(0xb7, 0, 0, 0, 0), // mov64 r0 = 0
@ -107,3 +114,143 @@ pub fn is_program_supported(program_type: ProgramType) -> Result<bool, SyscallEr
}), }),
} }
} }
/// Whether the host kernel supports the [`MapType`].
///
/// # Examples
///
/// ```no_run
/// # use aya::{
/// # maps::MapType,
/// # sys::feature_probe::is_map_supported,
/// # };
/// #
/// match is_map_supported(MapType::HashOfMaps) {
/// Ok(true) => println!("hash_of_maps supported :)"),
/// Ok(false) => println!("hash_of_maps not supported :("),
/// Err(err) => println!("Uh oh! Unexpected error: {:?}", err),
/// }
/// ```
///
/// # Errors
///
/// Returns [`SyscallError`] if kernel probing fails with an unexpected error.
///
/// Note that certain errors are expected and handled internally; only
/// unanticipated failures during probing will result in this error.
pub fn is_map_supported(map_type: MapType) -> Result<bool, SyscallError> {
if map_type == MapType::Unspecified {
return Ok(false);
}
// SAFETY: all-zero byte-pattern valid for `bpf_attr`
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
// SAFETY: union access
let u = unsafe { &mut attr.__bindgen_anon_1 };
// To pass `map_alloc_check`/`map_alloc`
let key_size = match map_type {
MapType::LpmTrie | MapType::CgroupStorage | MapType::PerCpuCgroupStorage => 16,
MapType::Queue
| MapType::Stack
| MapType::RingBuf
| MapType::BloomFilter
| MapType::UserRingBuf
| MapType::Arena => 0,
_ => 4,
};
let value_size = match map_type {
MapType::StackTrace | MapType::LpmTrie => 8,
MapType::SkStorage
| MapType::InodeStorage
| MapType::TaskStorage
| MapType::CgrpStorage => {
// Intentionally trigger `E2BIG` from
// `bpf_local_storage_map_alloc_check()`.
u32::MAX
}
MapType::RingBuf | MapType::UserRingBuf | MapType::Arena => 0,
_ => 4,
};
let max_entries = match map_type {
MapType::CgroupStorage
| MapType::PerCpuCgroupStorage
| MapType::SkStorage
| MapType::InodeStorage
| MapType::TaskStorage
| MapType::CgrpStorage => 0,
MapType::RingBuf | MapType::UserRingBuf => page_size() as u32,
_ => 1,
};
let inner_map_fd;
match map_type {
MapType::LpmTrie => u.map_flags = BPF_F_NO_PREALLOC,
MapType::SkStorage
| MapType::InodeStorage
| MapType::TaskStorage
| MapType::CgrpStorage => {
u.map_flags = BPF_F_NO_PREALLOC;
u.btf_key_type_id = 1;
u.btf_value_type_id = 1;
}
MapType::ArrayOfMaps | MapType::HashOfMaps => {
inner_map_fd = dummy_map()?;
u.inner_map_fd = inner_map_fd.as_raw_fd() as u32;
}
MapType::StructOps => u.btf_vmlinux_value_type_id = 1,
MapType::Arena => u.map_flags = BPF_F_MMAPABLE,
_ => {}
}
u.map_type = map_type as u32;
u.key_size = key_size;
u.value_size = value_size;
u.max_entries = max_entries;
// SAFETY: BPF_MAP_CREATE returns a new file descriptor.
let io_error = match unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) } {
Ok(_) => return Ok(true),
Err((_, io_error)) => io_error,
};
match io_error.raw_os_error() {
Some(EINVAL) => Ok(false),
Some(E2BIG)
if matches!(
map_type,
MapType::SkStorage
| MapType::InodeStorage
| MapType::TaskStorage
| MapType::CgrpStorage
) =>
{
Ok(true)
}
// `ENOTSUPP` from `bpf_struct_ops_map_alloc()` for struct_ops
Some(524) if map_type == MapType::StructOps => Ok(true),
_ => Err(SyscallError {
call: "bpf_map_create",
io_error,
}),
}
}
/// Create a map and return its fd.
fn dummy_map() -> Result<crate::MockableFd, SyscallError> {
// SAFETY: all-zero byte-pattern valid for `bpf_attr`
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
// SAFETY: union access
let u = unsafe { &mut attr.__bindgen_anon_1 };
u.map_type = 1;
u.key_size = 1;
u.value_size = 1;
u.max_entries = 1;
// SAFETY: BPF_MAP_CREATE returns a new file descriptor.
unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) }.map_err(|(_, io_error)| {
SyscallError {
call: "bpf_map_create",
io_error,
}
})
}

@ -1,7 +1,9 @@
//! Test feature probing against kernel version. //! Test feature probing against kernel version.
use assert_matches::assert_matches; use assert_matches::assert_matches;
use aya::{programs::ProgramType, sys::feature_probe::*, util::KernelVersion}; use aya::{maps::MapType, programs::ProgramType, sys::feature_probe::*, util::KernelVersion};
use super::load::{MAX_RETRIES, RETRY_DURATION};
// TODO: Enable certain CONFIG_* options when compiling the image for VM tests. // TODO: Enable certain CONFIG_* options when compiling the image for VM tests.
#[test] #[test]
@ -193,14 +195,211 @@ fn probe_supported_programs() {
} }
} }
#[test]
fn probe_supported_maps() {
let current = KernelVersion::current().unwrap();
let hash = is_map_supported(MapType::Hash);
let array = is_map_supported(MapType::Array);
if current >= KernelVersion::new(3, 19, 0) {
assert_matches!(hash, Ok(true));
assert_matches!(array, Ok(true));
} else {
assert_matches!(hash, Ok(false));
assert_matches!(array, Ok(false));
}
let prog_array = retry(3, || is_map_supported(MapType::ProgramArray));
if current >= KernelVersion::new(4, 2, 0) {
assert_matches!(prog_array, Ok(true));
} else {
assert_matches!(prog_array, Ok(false));
}
let perf_event_array = retry(3, || is_map_supported(MapType::PerfEventArray));
if current >= KernelVersion::new(4, 3, 0) {
assert_matches!(perf_event_array, Ok(true));
} else {
assert_matches!(perf_event_array, Ok(false));
}
let per_cpu_hash = retry(3, || is_map_supported(MapType::PerCpuHash));
let per_cpu_array = retry(3, || is_map_supported(MapType::PerCpuArray));
let stack_trace = retry(3, || is_map_supported(MapType::StackTrace));
if current >= KernelVersion::new(4, 6, 0) {
assert_matches!(per_cpu_hash, Ok(true));
assert_matches!(per_cpu_array, Ok(true));
assert_matches!(stack_trace, Ok(true));
} else {
assert_matches!(per_cpu_hash, Ok(false));
assert_matches!(per_cpu_array, Ok(false));
assert_matches!(stack_trace, Ok(false));
}
let cgroup_array = is_map_supported(MapType::CgroupArray);
if current >= KernelVersion::new(4, 8, 0) {
assert_matches!(cgroup_array, Ok(true));
} else {
assert_matches!(cgroup_array, Ok(false));
}
let lru_hash = retry(3, || is_map_supported(MapType::LruHash));
let lru_per_cpu_hash = retry(3, || is_map_supported(MapType::LruPerCpuHash));
if current >= KernelVersion::new(4, 10, 0) {
assert_matches!(lru_hash, Ok(true));
assert_matches!(lru_per_cpu_hash, Ok(true));
} else {
assert_matches!(lru_hash, Ok(false));
assert_matches!(lru_per_cpu_hash, Ok(false));
}
let lpm_trie = retry(3, || is_map_supported(MapType::LpmTrie));
if current >= KernelVersion::new(4, 11, 0) {
assert_matches!(lpm_trie, Ok(true));
} else {
assert_matches!(lpm_trie, Ok(false));
}
let array_of_maps = retry(3, || is_map_supported(MapType::ArrayOfMaps));
let hash_of_maps = retry(3, || is_map_supported(MapType::HashOfMaps));
if current >= KernelVersion::new(4, 12, 0) {
assert_matches!(array_of_maps, Ok(true));
assert_matches!(hash_of_maps, Ok(true));
} else {
assert_matches!(array_of_maps, Ok(false));
assert_matches!(hash_of_maps, Ok(false));
}
let dev_map = retry(MAX_RETRIES, || is_map_supported(MapType::DevMap));
let sock_map = retry(3, || is_map_supported(MapType::SockMap));
if current >= KernelVersion::new(4, 14, 0) {
assert_matches!(dev_map, Ok(true));
assert_matches!(sock_map, Ok(true));
} else {
assert_matches!(dev_map, Ok(false));
assert_matches!(sock_map, Ok(false));
}
let cpu_map = retry(3, || is_map_supported(MapType::CpuMap));
if current >= KernelVersion::new(4, 15, 0) {
assert_matches!(cpu_map, Ok(true));
} else {
assert_matches!(cpu_map, Ok(false));
}
let xsk_map = retry(3, || is_map_supported(MapType::XskMap));
let sock_hash = retry(3, || is_map_supported(MapType::SockHash));
if current >= KernelVersion::new(4, 18, 0) {
assert_matches!(xsk_map, Ok(true));
assert_matches!(sock_hash, Ok(true));
} else {
assert_matches!(xsk_map, Ok(false));
assert_matches!(sock_hash, Ok(false));
}
let cgroup_storage = retry(3, || is_map_supported(MapType::CgroupStorage));
let reuseport_sock_array = retry(3, || is_map_supported(MapType::ReuseportSockArray));
if current >= KernelVersion::new(4, 19, 0) {
assert_matches!(cgroup_storage, Ok(true));
assert_matches!(reuseport_sock_array, Ok(true));
} else {
assert_matches!(cgroup_storage, Ok(false));
assert_matches!(reuseport_sock_array, Ok(false));
}
let per_cpu_cgroup_storage = retry(3, || is_map_supported(MapType::PerCpuCgroupStorage));
let queue = retry(3, || is_map_supported(MapType::Queue));
let stack = retry(3, || is_map_supported(MapType::Stack));
if current >= KernelVersion::new(4, 20, 0) {
assert_matches!(per_cpu_cgroup_storage, Ok(true));
assert_matches!(queue, Ok(true));
assert_matches!(stack, Ok(true));
} else {
assert_matches!(per_cpu_cgroup_storage, Ok(false));
assert_matches!(queue, Ok(false));
assert_matches!(stack, Ok(false));
}
let sk_storage = retry(3, || is_map_supported(MapType::SkStorage));
if current >= KernelVersion::new(5, 2, 0) {
assert_matches!(sk_storage, Ok(true));
} else {
assert_matches!(sk_storage, Ok(false));
}
let devmap_hash = retry(MAX_RETRIES, || is_map_supported(MapType::DevMapHash));
if current >= KernelVersion::new(5, 4, 0) {
assert_matches!(devmap_hash, Ok(true));
} else {
assert_matches!(devmap_hash, Ok(false));
}
let struct_ops = retry(3, || is_map_supported(MapType::StructOps));
if current >= KernelVersion::new(5, 6, 0) {
assert_matches!(struct_ops, Ok(true));
} else {
assert_matches!(struct_ops, Ok(false));
}
let ring_buf = retry(MAX_RETRIES, || is_map_supported(MapType::RingBuf));
if current >= KernelVersion::new(5, 8, 0) {
assert_matches!(ring_buf, Ok(true));
} else {
assert_matches!(ring_buf, Ok(false));
}
// Requires `CONFIG_BPF_LSM=y`
// let inode_storage = retry(3, || is_map_supported(MapType::InodeStorage));
// if current >= KernelVersion::new(5, 10, 0) {
// assert_matches!(inode_storage, Ok(true));
// } else {
// assert_matches!(inode_storage, Ok(false));
// }
let task_storage = retry(3, || is_map_supported(MapType::TaskStorage));
if current >= KernelVersion::new(5, 11, 0) {
assert_matches!(task_storage, Ok(true));
} else {
assert_matches!(task_storage, Ok(false));
}
let bloom_filter = retry(3, || is_map_supported(MapType::BloomFilter));
if current >= KernelVersion::new(5, 16, 0) {
assert_matches!(bloom_filter, Ok(true));
} else {
assert_matches!(bloom_filter, Ok(false));
}
let user_ring_buf = retry(3, || is_map_supported(MapType::UserRingBuf));
if current >= KernelVersion::new(6, 1, 0) {
assert_matches!(user_ring_buf, Ok(true));
} else {
assert_matches!(user_ring_buf, Ok(false));
}
let cgrp_storage = retry(3, || is_map_supported(MapType::CgrpStorage));
if current >= KernelVersion::new(6, 2, 0) {
assert_matches!(cgrp_storage, Ok(true));
} else {
assert_matches!(cgrp_storage, Ok(false));
}
let arena = retry(3, || is_map_supported(MapType::Arena));
if current >= KernelVersion::new(6, 9, 0) {
assert_matches!(arena, Ok(true));
} else {
assert_matches!(arena, Ok(false));
}
}
// Back-to-back calls can be flaky and return `EPERM`. // Back-to-back calls can be flaky and return `EPERM`.
fn retry<T, E>(max_retries: u64, try_func: impl Fn() -> Result<T, E>) -> Result<T, E> { fn retry<T, E>(max_retries: usize, try_func: impl Fn() -> Result<T, E>) -> Result<T, E> {
let mut res = try_func(); let mut res = try_func();
for i in 1..max_retries { for i in 1..(max_retries as u32) {
if res.is_ok() { if res.is_ok() {
return res; return res;
} }
std::thread::sleep(std::time::Duration::from_millis(i * 10)); std::thread::sleep(RETRY_DURATION * i);
res = try_func(); res = try_func();
} }
res res

@ -12,8 +12,8 @@ use aya::{
use aya_obj::programs::XdpAttachType; use aya_obj::programs::XdpAttachType;
use test_log::test; use test_log::test;
const MAX_RETRIES: usize = 100; pub(crate) const MAX_RETRIES: usize = 100;
const RETRY_DURATION: Duration = Duration::from_millis(10); pub(crate) const RETRY_DURATION: Duration = Duration::from_millis(10);
#[test] #[test]
fn long_name() { fn long_name() {

@ -9006,6 +9006,7 @@ pub fn aya::programs::tc::SchedClassifier::fd(&self) -> core::result::Result<std
pub fn aya::programs::loaded_programs() -> impl core::iter::traits::iterator::Iterator<Item = core::result::Result<aya::programs::ProgramInfo, aya::programs::ProgramError>> pub fn aya::programs::loaded_programs() -> impl core::iter::traits::iterator::Iterator<Item = core::result::Result<aya::programs::ProgramInfo, aya::programs::ProgramError>>
pub mod aya::sys pub mod aya::sys
pub mod aya::sys::feature_probe pub mod aya::sys::feature_probe
pub fn aya::sys::feature_probe::is_map_supported(map_type: aya::maps::MapType) -> core::result::Result<bool, aya::sys::SyscallError>
pub fn aya::sys::feature_probe::is_program_supported(program_type: aya::programs::ProgramType) -> core::result::Result<bool, aya::sys::SyscallError> pub fn aya::sys::feature_probe::is_program_supported(program_type: aya::programs::ProgramType) -> core::result::Result<bool, aya::sys::SyscallError>
#[non_exhaustive] pub enum aya::sys::Stats #[non_exhaustive] pub enum aya::sys::Stats
pub aya::sys::Stats::RunTime pub aya::sys::Stats::RunTime

Loading…
Cancel
Save