aya,aya-obj: add feature probing program type

Adds API that probes whether kernel supports a program type.

Additionally add `const fn bpf_insn::new()` so that common instructions
are not re-computed from `copy_instructions()`.

Assertions for `LircMode2` and `Lsm` are disabled because they require
certain kernel configs to be enabled, which are not by default in VM
tests.
pull/1063/head
Tyrone Wu 3 months ago
parent 6c3b2a6b9c
commit 359b3eafe1
No known key found for this signature in database
GPG Key ID: 978B1A1B79210AD6

@ -23,8 +23,8 @@ use crate::{
Array, Btf, BtfError, BtfExt, BtfFeatures, BtfType, DataSecEntry, FuncSecInfo, LineSecInfo, Array, Btf, BtfError, BtfExt, BtfFeatures, BtfType, DataSecEntry, FuncSecInfo, LineSecInfo,
}, },
generated::{ generated::{
bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_CALL, BPF_F_RDONLY_PROG, __BindgenBitfieldUnit, bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_CALL,
BPF_JMP, BPF_K, BPF_F_RDONLY_PROG, BPF_JMP, BPF_K,
}, },
maps::{bpf_map_def, BtfMap, BtfMapDef, LegacyMap, Map, PinningType, MINIMUM_MAP_SIZE}, maps::{bpf_map_def, BtfMap, BtfMapDef, LegacyMap, Map, PinningType, MINIMUM_MAP_SIZE},
programs::{ programs::{
@ -135,6 +135,42 @@ impl Features {
} }
} }
impl bpf_insn {
/// Creates a [BPF instruction](bpf_insn).
///
/// The arguments will be converted to the host's endianness.
pub const fn new(code: u8, dst_reg: u8, src_reg: u8, off: i16, imm: i32) -> Self {
if dst_reg > 10 || src_reg > 10 {
panic!("invalid register number");
}
let registers;
let offset;
let immediate;
#[cfg(target_endian = "little")]
{
registers = (src_reg << 4) | dst_reg;
offset = off.swap_bytes();
immediate = imm.swap_bytes();
}
#[cfg(target_endian = "big")]
{
registers = (dst_reg << 4) | src_reg;
offset = off;
immediate = imm;
}
bpf_insn {
code,
_bitfield_align_1: [],
_bitfield_1: __BindgenBitfieldUnit::new([registers]),
off: offset,
imm: immediate,
}
}
}
/// The loaded object file representation /// The loaded object file representation
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Object { pub struct Object {

@ -1139,7 +1139,7 @@ pub(crate) fn is_btf_type_tag_supported() -> bool {
bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok() bpf_load_btf(btf_bytes.as_slice(), &mut [], Default::default()).is_ok()
} }
fn bpf_prog_load(attr: &mut bpf_attr) -> SysResult<crate::MockableFd> { pub(super) fn bpf_prog_load(attr: &mut bpf_attr) -> SysResult<crate::MockableFd> {
// SAFETY: BPF_PROG_LOAD returns a new file descriptor. // SAFETY: BPF_PROG_LOAD returns a new file descriptor.
unsafe { fd_sys_bpf(bpf_cmd::BPF_PROG_LOAD, attr) } unsafe { fd_sys_bpf(bpf_cmd::BPF_PROG_LOAD, attr) }
} }

@ -0,0 +1,109 @@
//! Probes and identifies available eBPF features supported by the host kernel.
use std::mem;
use aya_obj::generated::{bpf_attach_type, bpf_attr, bpf_insn, BPF_F_SLEEPABLE};
use libc::{E2BIG, EINVAL};
use super::{bpf_prog_load, SyscallError};
use crate::{programs::ProgramType, util::KernelVersion};
const RETURN_ZERO_INSNS: &[bpf_insn] = &[
bpf_insn::new(0xb7, 0, 0, 0, 0), // mov64 r0 = 0
bpf_insn::new(0x95, 0, 0, 0, 0), // exit
];
const GPL_COMPATIBLE: &[u8; 4] = b"GPL\0";
/// Whether the host kernel supports the [`ProgramType`].
///
/// # Examples
///
/// ```no_run
/// # use aya::{
/// # programs::ProgramType,
/// # sys::feature_probe::is_program_supported,
/// # };
/// #
/// match is_program_supported(ProgramType::Xdp) {
/// Ok(true) => println!("XDP supported :)"),
/// Ok(false) => println!("XDP not supported :("),
/// Err(err) => println!("Uh oh! Unexpected error: {:?}", err),
/// }
/// ```
///
/// # Errors
///
/// Returns [`SyscallError`] if kernel probing fails with an unexpected error.
///
/// Note that certain errors are expected and handled internally; only
/// unanticipated failures during probing will result in this error.
pub fn is_program_supported(program_type: ProgramType) -> Result<bool, SyscallError> {
if program_type == ProgramType::Unspecified {
return Ok(false);
}
// SAFETY: all-zero byte-pattern valid for `bpf_attr`
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
// SAFETY: union access
let u = unsafe { &mut attr.__bindgen_anon_3 };
// `bpf_prog_load_fixup_attach_type()` sets this for us for cgroup_sock and
// and sk_reuseport.
let expected_attach_type = match program_type {
ProgramType::CgroupSkb => Some(bpf_attach_type::BPF_CGROUP_INET_INGRESS),
ProgramType::CgroupSockAddr => Some(bpf_attach_type::BPF_CGROUP_INET4_BIND),
ProgramType::CgroupSockopt => Some(bpf_attach_type::BPF_CGROUP_GETSOCKOPT),
ProgramType::SkLookup => Some(bpf_attach_type::BPF_SK_LOOKUP),
ProgramType::Netfilter => Some(bpf_attach_type::BPF_NETFILTER),
_ => None,
};
// Intentionally trigger `EINVAL` for some prog types, and use verifier
// logs to help confirm whether the variant actually exists.
let mut verifier_log = [0_u8; libc::PATH_MAX as usize];
match program_type {
ProgramType::KProbe => u.kern_version = KernelVersion::current().unwrap().code(),
ProgramType::Tracing | ProgramType::Extension | ProgramType::Lsm => {
u.log_buf = verifier_log.as_mut_ptr() as _;
u.log_size = libc::PATH_MAX as _;
u.log_level = 1;
}
ProgramType::Syscall => u.prog_flags = BPF_F_SLEEPABLE,
_ => {}
}
u.prog_type = program_type as u32;
u.insn_cnt = 2;
u.insns = RETURN_ZERO_INSNS.as_ptr() as u64;
u.license = GPL_COMPATIBLE.as_ptr() as u64;
if let Some(expected_attach_type) = expected_attach_type {
u.expected_attach_type = expected_attach_type as u32;
}
let io_error = match bpf_prog_load(&mut attr) {
Ok(_) => return Ok(true),
Err((_, io_error)) => io_error,
};
match io_error.raw_os_error() {
Some(EINVAL) => {
// verifier/`bpf_check_attach_target()` produces same log message
// for these types (due to `attach_btf_id` unset)
let supported = matches!(
program_type, ProgramType::Tracing | ProgramType::Extension | ProgramType::Lsm
if verifier_log.starts_with(b"Tracing programs must provide btf_id")
);
Ok(supported)
}
// `E2BIG` when accessing/using fields that are not available
// e.g. `expected_attach_type`
Some(E2BIG) => Ok(false),
// `ENOTSUPP` from verifier/`check_struct_ops_btf_id()` for struct_ops
Some(524) if program_type == ProgramType::StructOps => Ok(true),
_ => Err(SyscallError {
call: "bpf_prog_load",
io_error,
}),
}
}

@ -1,6 +1,7 @@
//! A collection of system calls for performing eBPF related operations. //! A collection of system calls for performing eBPF related operations.
mod bpf; mod bpf;
pub mod feature_probe;
mod netlink; mod netlink;
mod perf_event; mod perf_event;

@ -1,6 +1,7 @@
mod bpf_probe_read; mod bpf_probe_read;
mod btf_relocations; mod btf_relocations;
mod elf; mod elf;
mod feature_probe;
mod info; mod info;
mod load; mod load;
mod log; mod log;

@ -0,0 +1,207 @@
//! Test feature probing against kernel version.
use assert_matches::assert_matches;
use aya::{programs::ProgramType, sys::feature_probe::*, util::KernelVersion};
// TODO: Enable certain CONFIG_* options when compiling the image for VM tests.
#[test]
fn probe_supported_programs() {
let current = KernelVersion::current().unwrap();
let socket_filter = retry(3, || is_program_supported(ProgramType::SocketFilter));
if current >= KernelVersion::new(3, 19, 0) {
assert_matches!(socket_filter, Ok(true));
} else {
assert_matches!(socket_filter, Ok(false));
}
let kprobe = retry(3, || is_program_supported(ProgramType::KProbe));
let sched_cls = retry(3, || is_program_supported(ProgramType::SchedClassifier));
let sched_act = retry(3, || is_program_supported(ProgramType::SchedAction));
if current >= KernelVersion::new(4, 1, 0) {
assert_matches!(kprobe, Ok(true));
assert_matches!(sched_cls, Ok(true));
assert_matches!(sched_act, Ok(true));
} else {
assert_matches!(kprobe, Ok(false));
assert_matches!(sched_cls, Ok(false));
assert_matches!(sched_act, Ok(false));
}
let tracepoint = retry(3, || is_program_supported(ProgramType::TracePoint));
if current >= KernelVersion::new(4, 7, 0) {
assert_matches!(tracepoint, Ok(true));
} else {
assert_matches!(tracepoint, Ok(false));
}
let xdp = retry(3, || is_program_supported(ProgramType::Xdp));
if current >= KernelVersion::new(4, 8, 0) {
assert_matches!(xdp, Ok(true));
} else {
assert_matches!(xdp, Ok(false));
}
let perf_event = retry(3, || is_program_supported(ProgramType::PerfEvent));
if current >= KernelVersion::new(4, 9, 0) {
assert_matches!(perf_event, Ok(true));
} else {
assert_matches!(perf_event, Ok(false));
}
let cgroup_skb = retry(3, || is_program_supported(ProgramType::CgroupSkb));
let cgroup_sock = retry(3, || is_program_supported(ProgramType::CgroupSock));
let lwt_in = retry(3, || is_program_supported(ProgramType::LwtInput));
let lwt_out = retry(3, || is_program_supported(ProgramType::LwtOutput));
let lwt_xmit = retry(3, || is_program_supported(ProgramType::LwtXmit));
if current >= KernelVersion::new(4, 10, 0) {
assert_matches!(cgroup_skb, Ok(true));
assert_matches!(cgroup_sock, Ok(true));
assert_matches!(lwt_in, Ok(true));
assert_matches!(lwt_out, Ok(true));
assert_matches!(lwt_xmit, Ok(true));
} else {
assert_matches!(cgroup_skb, Ok(false));
assert_matches!(cgroup_sock, Ok(false));
assert_matches!(lwt_in, Ok(false));
assert_matches!(lwt_out, Ok(false));
assert_matches!(lwt_xmit, Ok(false));
}
let sock_ops = retry(3, || is_program_supported(ProgramType::SockOps));
if current >= KernelVersion::new(4, 13, 0) {
assert_matches!(sock_ops, Ok(true));
} else {
assert_matches!(sock_ops, Ok(false));
}
let sk_skb = retry(3, || is_program_supported(ProgramType::SkSkb));
if current >= KernelVersion::new(4, 14, 0) {
assert_matches!(sk_skb, Ok(true));
} else {
assert_matches!(sk_skb, Ok(false));
}
let cgroup_device = retry(3, || is_program_supported(ProgramType::CgroupDevice));
if current >= KernelVersion::new(4, 15, 0) {
assert_matches!(cgroup_device, Ok(true));
} else {
assert_matches!(cgroup_device, Ok(false));
}
let sk_msg = retry(3, || is_program_supported(ProgramType::SkMsg));
let raw_tp = retry(3, || is_program_supported(ProgramType::RawTracePoint));
let cgroup_sock_addr = retry(3, || is_program_supported(ProgramType::CgroupSockAddr));
if current >= KernelVersion::new(4, 17, 0) {
assert_matches!(sk_msg, Ok(true));
assert_matches!(raw_tp, Ok(true));
assert_matches!(cgroup_sock_addr, Ok(true));
} else {
assert_matches!(sk_msg, Ok(false));
assert_matches!(raw_tp, Ok(false));
assert_matches!(cgroup_sock_addr, Ok(false));
}
let lwt_seg6local = retry(3, || is_program_supported(ProgramType::LwtSeg6local));
// Requires `CONFIG_BPF_LIRC_MODE2=y`.
// let lirc_mode2 = is_program_supported(ProgramType::LircMode2);
if current >= KernelVersion::new(4, 18, 0) {
assert_matches!(lwt_seg6local, Ok(true));
// assert_matches!(lirc_mode2, Ok(true));
} else {
assert_matches!(lwt_seg6local, Ok(false));
// assert_matches!(lirc_mode2, Ok(false));
}
let sk_reuseport = retry(3, || is_program_supported(ProgramType::SkReuseport));
if current >= KernelVersion::new(4, 19, 0) {
assert_matches!(sk_reuseport, Ok(true));
} else {
assert_matches!(sk_reuseport, Ok(false));
}
let flow_dissector = retry(3, || is_program_supported(ProgramType::FlowDissector));
if current >= KernelVersion::new(4, 20, 0) {
assert_matches!(flow_dissector, Ok(true));
} else {
assert_matches!(flow_dissector, Ok(false));
}
let cgroup_sysctl = retry(3, || is_program_supported(ProgramType::CgroupSysctl));
let raw_tp_writable = retry(3, || {
is_program_supported(ProgramType::RawTracePointWritable)
});
if current >= KernelVersion::new(5, 2, 0) {
assert_matches!(cgroup_sysctl, Ok(true));
assert_matches!(raw_tp_writable, Ok(true));
} else {
assert_matches!(cgroup_sysctl, Ok(false));
assert_matches!(raw_tp_writable, Ok(false));
}
let cgroup_sockopt = retry(3, || is_program_supported(ProgramType::CgroupSockopt));
if current >= KernelVersion::new(5, 3, 0) {
assert_matches!(cgroup_sockopt, Ok(true));
} else {
assert_matches!(cgroup_sockopt, Ok(false));
}
let tracing = retry(3, || is_program_supported(ProgramType::Tracing));
if current >= KernelVersion::new(5, 5, 0) {
assert_matches!(tracing, Ok(true));
} else {
assert_matches!(tracing, Ok(false));
}
let struct_ops = retry(3, || is_program_supported(ProgramType::StructOps));
let extension = retry(3, || is_program_supported(ProgramType::Extension));
if current >= KernelVersion::new(5, 6, 0) {
assert_matches!(struct_ops, Ok(true));
assert_matches!(extension, Ok(true));
} else {
assert_matches!(struct_ops, Ok(false));
assert_matches!(extension, Ok(false));
}
// Requires `CONFIG_BPF_LSM=y`
// let lsm = retry(3, || is_program_supported(ProgramType::Lsm));
// if current >= KernelVersion::new(5, 7, 0) {
// assert_matches!(lsm, Ok(true));
// } else {
// assert_matches!(lsm, Ok(false));
// }
let sk_lookup = retry(3, || is_program_supported(ProgramType::SkLookup));
if current >= KernelVersion::new(5, 9, 0) {
assert_matches!(sk_lookup, Ok(true));
} else {
assert_matches!(sk_lookup, Ok(false));
}
let syscall = retry(3, || is_program_supported(ProgramType::Syscall));
if current >= KernelVersion::new(5, 14, 0) {
assert_matches!(syscall, Ok(true));
} else {
assert_matches!(syscall, Ok(false));
}
let netfilter = retry(3, || is_program_supported(ProgramType::Netfilter));
if current >= KernelVersion::new(6, 4, 0) {
assert_matches!(netfilter, Ok(true));
} else {
assert_matches!(netfilter, Ok(false));
}
}
// Back-to-back calls can be flaky and return `EPERM`.
fn retry<T, E>(max_retries: u64, try_func: impl Fn() -> Result<T, E>) -> Result<T, E> {
let mut res = try_func();
for i in 1..max_retries {
if res.is_ok() {
return res;
}
std::thread::sleep(std::time::Duration::from_millis(i * 10));
res = try_func();
}
res
}

@ -4772,6 +4772,8 @@ pub fn aya_obj::generated::bpf_insn::new_bitfield_1(dst_reg: aya_obj::generated:
pub fn aya_obj::generated::bpf_insn::set_dst_reg(&mut self, val: aya_obj::generated::__u8) pub fn aya_obj::generated::bpf_insn::set_dst_reg(&mut self, val: aya_obj::generated::__u8)
pub fn aya_obj::generated::bpf_insn::set_src_reg(&mut self, val: aya_obj::generated::__u8) pub fn aya_obj::generated::bpf_insn::set_src_reg(&mut self, val: aya_obj::generated::__u8)
pub fn aya_obj::generated::bpf_insn::src_reg(&self) -> aya_obj::generated::__u8 pub fn aya_obj::generated::bpf_insn::src_reg(&self) -> aya_obj::generated::__u8
impl aya_obj::generated::bpf_insn
pub const fn aya_obj::generated::bpf_insn::new(code: u8, dst_reg: u8, src_reg: u8, off: i16, imm: i32) -> Self
impl core::clone::Clone for aya_obj::generated::bpf_insn impl core::clone::Clone for aya_obj::generated::bpf_insn
pub fn aya_obj::generated::bpf_insn::clone(&self) -> aya_obj::generated::bpf_insn pub fn aya_obj::generated::bpf_insn::clone(&self) -> aya_obj::generated::bpf_insn
impl core::fmt::Debug for aya_obj::generated::bpf_insn impl core::fmt::Debug for aya_obj::generated::bpf_insn

@ -9005,6 +9005,8 @@ impl aya::programs::MultiProgram for aya::programs::tc::SchedClassifier
pub fn aya::programs::tc::SchedClassifier::fd(&self) -> core::result::Result<std::os::fd::owned::BorrowedFd<'_>, aya::programs::ProgramError> pub fn aya::programs::tc::SchedClassifier::fd(&self) -> core::result::Result<std::os::fd::owned::BorrowedFd<'_>, aya::programs::ProgramError>
pub fn aya::programs::loaded_programs() -> impl core::iter::traits::iterator::Iterator<Item = core::result::Result<aya::programs::ProgramInfo, aya::programs::ProgramError>> pub fn aya::programs::loaded_programs() -> impl core::iter::traits::iterator::Iterator<Item = core::result::Result<aya::programs::ProgramInfo, aya::programs::ProgramError>>
pub mod aya::sys pub mod aya::sys
pub mod aya::sys::feature_probe
pub fn aya::sys::feature_probe::is_program_supported(program_type: aya::programs::ProgramType) -> core::result::Result<bool, aya::sys::SyscallError>
#[non_exhaustive] pub enum aya::sys::Stats #[non_exhaustive] pub enum aya::sys::Stats
pub aya::sys::Stats::RunTime pub aya::sys::Stats::RunTime
impl core::clone::Clone for aya::sys::Stats impl core::clone::Clone for aya::sys::Stats

Loading…
Cancel
Save