From e5e5bfd0747fdb34e519506df7e5808ef9afba67 Mon Sep 17 00:00:00 2001 From: Tyrone Wu Date: Mon, 28 Oct 2024 02:09:27 +0000 Subject: [PATCH] aya: add feature probing for map type Add API that probes whether kernel supports a map type. --- aya/src/sys/bpf.rs | 5 +- aya/src/sys/feature_probe.rs | 159 +++++++++++++- .../src/tests/feature_probe.rs | 198 +++++++++++++++++- xtask/public-api/aya.txt | 1 + 4 files changed, 357 insertions(+), 6 deletions(-) diff --git a/aya/src/sys/bpf.rs b/aya/src/sys/bpf.rs index a52164ed..f27c32a0 100644 --- a/aya/src/sys/bpf.rs +++ b/aya/src/sys/bpf.rs @@ -681,7 +681,10 @@ pub(crate) fn bpf_load_btf( } // SAFETY: only use for bpf_cmd that return a new file descriptor on success. -unsafe fn fd_sys_bpf(cmd: bpf_cmd, attr: &mut bpf_attr) -> io::Result { +pub(super) unsafe fn fd_sys_bpf( + cmd: bpf_cmd, + attr: &mut bpf_attr, +) -> io::Result { let fd = sys_bpf(cmd, attr)?; let fd = fd.try_into().map_err(|std::num::TryFromIntError { .. }| { io::Error::new( diff --git a/aya/src/sys/feature_probe.rs b/aya/src/sys/feature_probe.rs index e60ecd28..c710a349 100644 --- a/aya/src/sys/feature_probe.rs +++ b/aya/src/sys/feature_probe.rs @@ -1,10 +1,19 @@ //! Probes and identifies available eBPF features supported by the host kernel. -use aya_obj::btf::{Btf, BtfKind}; -use libc::{E2BIG, EINVAL}; +use std::{mem, os::fd::AsRawFd as _}; -use super::{SyscallError, bpf_prog_load, with_trivial_prog}; -use crate::programs::{ProgramError, ProgramType}; +use aya_obj::{ + btf::{Btf, BtfKind}, + generated::{BPF_F_MMAPABLE, BPF_F_NO_PREALLOC, bpf_attr, bpf_cmd, bpf_map_type}, +}; +use libc::{E2BIG, EBADF, EINVAL}; + +use super::{SyscallError, bpf_prog_load, fd_sys_bpf, with_trivial_prog}; +use crate::{ + maps::MapType, + programs::{ProgramError, ProgramType}, + util::page_size, +}; /// Whether the host kernel supports the [`ProgramType`]. /// @@ -96,3 +105,145 @@ pub fn is_program_supported(program_type: ProgramType) -> Result Err(error), } } + +/// Whether the host kernel supports the [`MapType`]. +/// +/// # Examples +/// +/// ```no_run +/// # use aya::{ +/// # maps::MapType, +/// # sys::feature_probe::is_map_supported, +/// # }; +/// # +/// match is_map_supported(MapType::HashOfMaps) { +/// Ok(true) => println!("hash_of_maps supported :)"), +/// Ok(false) => println!("hash_of_maps not supported :("), +/// Err(err) => println!("Uh oh! Unexpected error: {:?}", err), +/// } +/// ``` +/// +/// # Errors +/// +/// Returns [`SyscallError`] if kernel probing fails with an unexpected error. +/// +/// Note that certain errors are expected and handled internally; only +/// unanticipated failures during probing will result in this error. +pub fn is_map_supported(map_type: MapType) -> Result { + if map_type == MapType::Unspecified { + return Ok(false); + } + + // SAFETY: all-zero byte-pattern valid for `bpf_attr` + let mut attr = unsafe { mem::zeroed::() }; + // SAFETY: union access + let u = unsafe { &mut attr.__bindgen_anon_1 }; + + // To pass `map_alloc_check`/`map_alloc` + let key_size = match map_type { + MapType::LpmTrie | MapType::CgroupStorage | MapType::PerCpuCgroupStorage => 16, + MapType::Queue + | MapType::Stack + | MapType::RingBuf + | MapType::BloomFilter + | MapType::UserRingBuf + | MapType::Arena => 0, + _ => 4, + }; + let value_size = match map_type { + MapType::StackTrace | MapType::LpmTrie => 8, + MapType::RingBuf | MapType::UserRingBuf | MapType::Arena => 0, + _ => 4, + }; + let max_entries = match map_type { + MapType::CgroupStorage + | MapType::PerCpuCgroupStorage + | MapType::SkStorage + | MapType::InodeStorage + | MapType::TaskStorage + | MapType::CgrpStorage => 0, + MapType::RingBuf | MapType::UserRingBuf => page_size() as u32, + _ => 1, + }; + + // Ensure that fd doesn't get dropped due to scoping. + let inner_map_fd; + match map_type { + MapType::LpmTrie => u.map_flags = BPF_F_NO_PREALLOC, + MapType::SkStorage + | MapType::InodeStorage + | MapType::TaskStorage + | MapType::CgrpStorage => { + u.map_flags = BPF_F_NO_PREALLOC; + // Intentionally trigger `EBADF` from `btf_get_by_fd()`. + u.btf_fd = u32::MAX; + u.btf_key_type_id = 1; + u.btf_value_type_id = 1; + } + MapType::ArrayOfMaps | MapType::HashOfMaps => { + // SAFETY: all-zero byte-pattern valid for `bpf_attr` + let mut attr_map = unsafe { mem::zeroed::() }; + // SAFETY: union access + let u_map = unsafe { &mut attr_map.__bindgen_anon_1 }; + u_map.map_type = bpf_map_type::BPF_MAP_TYPE_HASH as u32; + u_map.key_size = 1; + u_map.value_size = 1; + u_map.max_entries = 1; + // SAFETY: BPF_MAP_CREATE returns a new file descriptor. + inner_map_fd = unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr_map) }.map_err( + |io_error| SyscallError { + call: "bpf_map_create", + io_error, + }, + )?; + + u.inner_map_fd = inner_map_fd.as_raw_fd() as u32; + } + MapType::StructOps => u.btf_vmlinux_value_type_id = 1, + MapType::Arena => u.map_flags = BPF_F_MMAPABLE, + _ => {} + } + + u.map_type = map_type as u32; + u.key_size = key_size; + u.value_size = value_size; + u.max_entries = max_entries; + + // SAFETY: BPF_MAP_CREATE returns a new file descriptor. + let io_error = match unsafe { fd_sys_bpf(bpf_cmd::BPF_MAP_CREATE, &mut attr) } { + Ok(_) => return Ok(true), + Err(io_error) => io_error, + }; + match io_error.raw_os_error() { + Some(EINVAL) => Ok(false), + Some(E2BIG) + if matches!( + map_type, + MapType::SkStorage + | MapType::StructOps + | MapType::InodeStorage + | MapType::TaskStorage + | MapType::CgrpStorage + ) => + { + Ok(false) + } + Some(EBADF) + if matches!( + map_type, + MapType::SkStorage + | MapType::InodeStorage + | MapType::TaskStorage + | MapType::CgrpStorage + ) => + { + Ok(true) + } + // `ENOTSUPP` from `bpf_struct_ops_map_alloc()` for struct_ops. + Some(524) if map_type == MapType::StructOps => Ok(true), + _ => Err(SyscallError { + call: "bpf_map_create", + io_error, + }), + } +} diff --git a/test/integration-test/src/tests/feature_probe.rs b/test/integration-test/src/tests/feature_probe.rs index c1bd0d50..5c57afe3 100644 --- a/test/integration-test/src/tests/feature_probe.rs +++ b/test/integration-test/src/tests/feature_probe.rs @@ -1,7 +1,7 @@ //! Test feature probing against kernel version. use assert_matches::assert_matches; -use aya::{Btf, programs::ProgramType, sys::feature_probe::*, util::KernelVersion}; +use aya::{Btf, maps::MapType, programs::ProgramType, sys::feature_probe::*, util::KernelVersion}; use procfs::kernel_config; #[test] @@ -210,3 +210,199 @@ fn probe_supported_programs() { assert_matches!(netfilter, Ok(false)); } } + +#[test] +fn probe_supported_maps() { + let current = KernelVersion::current().unwrap(); + + let hash = is_map_supported(MapType::Hash); + let array = is_map_supported(MapType::Array); + if current >= KernelVersion::new(3, 19, 0) { + assert_matches!(hash, Ok(true)); + assert_matches!(array, Ok(true)); + } else { + assert_matches!(hash, Ok(false)); + assert_matches!(array, Ok(false)); + } + + let prog_array = is_map_supported(MapType::ProgramArray); + if current >= KernelVersion::new(4, 2, 0) { + assert_matches!(prog_array, Ok(true)); + } else { + assert_matches!(prog_array, Ok(false)); + } + + let perf_event_array = is_map_supported(MapType::PerfEventArray); + if current >= KernelVersion::new(4, 3, 0) { + assert_matches!(perf_event_array, Ok(true)); + } else { + assert_matches!(perf_event_array, Ok(false)); + } + + let per_cpu_hash = is_map_supported(MapType::PerCpuHash); + let per_cpu_array = is_map_supported(MapType::PerCpuArray); + let stack_trace = is_map_supported(MapType::StackTrace); + if current >= KernelVersion::new(4, 6, 0) { + assert_matches!(per_cpu_hash, Ok(true)); + assert_matches!(per_cpu_array, Ok(true)); + assert_matches!(stack_trace, Ok(true)); + } else { + assert_matches!(per_cpu_hash, Ok(false)); + assert_matches!(per_cpu_array, Ok(false)); + assert_matches!(stack_trace, Ok(false)); + } + + let cgroup_array = is_map_supported(MapType::CgroupArray); + if current >= KernelVersion::new(4, 8, 0) { + assert_matches!(cgroup_array, Ok(true)); + } else { + assert_matches!(cgroup_array, Ok(false)); + } + + let lru_hash = is_map_supported(MapType::LruHash); + let lru_per_cpu_hash = is_map_supported(MapType::LruPerCpuHash); + if current >= KernelVersion::new(4, 10, 0) { + assert_matches!(lru_hash, Ok(true)); + assert_matches!(lru_per_cpu_hash, Ok(true)); + } else { + assert_matches!(lru_hash, Ok(false)); + assert_matches!(lru_per_cpu_hash, Ok(false)); + } + + let lpm_trie = is_map_supported(MapType::LpmTrie); + if current >= KernelVersion::new(4, 11, 0) { + assert_matches!(lpm_trie, Ok(true)); + } else { + assert_matches!(lpm_trie, Ok(false)); + } + + let array_of_maps = is_map_supported(MapType::ArrayOfMaps); + let hash_of_maps = is_map_supported(MapType::HashOfMaps); + if current >= KernelVersion::new(4, 12, 0) { + assert_matches!(array_of_maps, Ok(true)); + assert_matches!(hash_of_maps, Ok(true)); + } else { + assert_matches!(array_of_maps, Ok(false)); + assert_matches!(hash_of_maps, Ok(false)); + } + + let dev_map = is_map_supported(MapType::DevMap); + let sock_map = is_map_supported(MapType::SockMap); + if current >= KernelVersion::new(4, 14, 0) { + assert_matches!(dev_map, Ok(true)); + assert_matches!(sock_map, Ok(true)); + } else { + assert_matches!(dev_map, Ok(false)); + assert_matches!(sock_map, Ok(false)); + } + + let cpu_map = is_map_supported(MapType::CpuMap); + if current >= KernelVersion::new(4, 15, 0) { + assert_matches!(cpu_map, Ok(true)); + } else { + assert_matches!(cpu_map, Ok(false)); + } + + let xsk_map = is_map_supported(MapType::XskMap); + let sock_hash = is_map_supported(MapType::SockHash); + if current >= KernelVersion::new(4, 18, 0) { + assert_matches!(xsk_map, Ok(true)); + assert_matches!(sock_hash, Ok(true)); + } else { + assert_matches!(xsk_map, Ok(false)); + assert_matches!(sock_hash, Ok(false)); + } + + let cgroup_storage = is_map_supported(MapType::CgroupStorage); + let reuseport_sock_array = is_map_supported(MapType::ReuseportSockArray); + if current >= KernelVersion::new(4, 19, 0) { + assert_matches!(cgroup_storage, Ok(true)); + assert_matches!(reuseport_sock_array, Ok(true)); + } else { + assert_matches!(cgroup_storage, Ok(false)); + assert_matches!(reuseport_sock_array, Ok(false)); + } + + let per_cpu_cgroup_storage = is_map_supported(MapType::PerCpuCgroupStorage); + let queue = is_map_supported(MapType::Queue); + let stack = is_map_supported(MapType::Stack); + if current >= KernelVersion::new(4, 20, 0) { + assert_matches!(per_cpu_cgroup_storage, Ok(true)); + assert_matches!(queue, Ok(true)); + assert_matches!(stack, Ok(true)); + } else { + assert_matches!(per_cpu_cgroup_storage, Ok(false)); + assert_matches!(queue, Ok(false)); + assert_matches!(stack, Ok(false)); + } + + let sk_storage = is_map_supported(MapType::SkStorage); + if current >= KernelVersion::new(5, 2, 0) { + assert_matches!(sk_storage, Ok(true)); + } else { + assert_matches!(sk_storage, Ok(false)); + } + + let devmap_hash = is_map_supported(MapType::DevMapHash); + if current >= KernelVersion::new(5, 4, 0) { + assert_matches!(devmap_hash, Ok(true)); + } else { + assert_matches!(devmap_hash, Ok(false)); + } + + let struct_ops = is_map_supported(MapType::StructOps); + if current >= KernelVersion::new(5, 6, 0) { + assert_matches!(struct_ops, Ok(true)); + } else { + assert_matches!(struct_ops, Ok(false)); + } + + let ring_buf = is_map_supported(MapType::RingBuf); + if current >= KernelVersion::new(5, 8, 0) { + assert_matches!(ring_buf, Ok(true)); + } else { + assert_matches!(ring_buf, Ok(false)); + } + + let inode_storage = is_map_supported(MapType::InodeStorage); // Requires `CONFIG_BPF_LSM=y` + if current >= KernelVersion::new(5, 10, 0) { + assert_matches!(inode_storage, Ok(true)); + } else { + assert_matches!(inode_storage, Ok(false)); + } + + let task_storage = is_map_supported(MapType::TaskStorage); + if current >= KernelVersion::new(5, 11, 0) { + assert_matches!(task_storage, Ok(true)); + } else { + assert_matches!(task_storage, Ok(false)); + } + + let bloom_filter = is_map_supported(MapType::BloomFilter); + if current >= KernelVersion::new(5, 16, 0) { + assert_matches!(bloom_filter, Ok(true)); + } else { + assert_matches!(bloom_filter, Ok(false)); + } + + let user_ring_buf = is_map_supported(MapType::UserRingBuf); + if current >= KernelVersion::new(6, 1, 0) { + assert_matches!(user_ring_buf, Ok(true)); + } else { + assert_matches!(user_ring_buf, Ok(false)); + } + + let cgrp_storage = is_map_supported(MapType::CgrpStorage); + if current >= KernelVersion::new(6, 2, 0) { + assert_matches!(cgrp_storage, Ok(true)); + } else { + assert_matches!(cgrp_storage, Ok(false)); + } + + let arena = is_map_supported(MapType::Arena); + if current >= KernelVersion::new(6, 9, 0) { + assert_matches!(arena, Ok(true)); + } else { + assert_matches!(arena, Ok(false)); + } +} diff --git a/xtask/public-api/aya.txt b/xtask/public-api/aya.txt index 92d8ff60..8e2c2238 100644 --- a/xtask/public-api/aya.txt +++ b/xtask/public-api/aya.txt @@ -9998,6 +9998,7 @@ pub fn aya::programs::tc::SchedClassifier::fd(&self) -> core::result::Result impl core::iter::traits::iterator::Iterator> pub mod aya::sys pub mod aya::sys::feature_probe +pub fn aya::sys::feature_probe::is_map_supported(map_type: aya::maps::MapType) -> core::result::Result pub fn aya::sys::feature_probe::is_program_supported(program_type: aya::programs::ProgramType) -> core::result::Result #[non_exhaustive] pub enum aya::sys::Stats pub aya::sys::Stats::RunTime