diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 08c94a23..feed9ad4 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -7,3 +7,7 @@ updates: directory: "/" schedule: interval: "weekly" + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" diff --git a/.github/workflows/build-aya-bpf.yml b/.github/workflows/build-aya-bpf.yml index a29d7c09..19193abc 100644 --- a/.github/workflows/build-aya-bpf.yml +++ b/.github/workflows/build-aya-bpf.yml @@ -26,13 +26,12 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: toolchain: nightly components: rust-src - override: true - uses: Swatinem/rust-cache@v1 diff --git a/.github/workflows/build-aya.yml b/.github/workflows/build-aya.yml index 076e3ac5..4e742c85 100644 --- a/.github/workflows/build-aya.yml +++ b/.github/workflows/build-aya.yml @@ -24,12 +24,15 @@ jobs: - riscv64gc-unknown-linux-gnu runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: toolchain: stable - override: true + + - uses: taiki-e/install-action@cargo-hack + - name: Check + run: cargo hack check --all-targets --feature-powerset --ignore-private - uses: Swatinem/rust-cache@v1 - name: Prereqs diff --git a/.github/workflows/gen.yml b/.github/workflows/gen.yml index 15a3e185..c6700dce 100644 --- a/.github/workflows/gen.yml +++ b/.github/workflows/gen.yml @@ -7,9 +7,9 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: libbpf/libbpf path: libbpf @@ -18,12 +18,10 @@ jobs: working-directory: libbpf run: echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: nightly components: rustfmt, clippy - override: true - uses: Swatinem/rust-cache@v1 diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index f06de720..e3e21f81 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -17,8 +17,8 @@ jobs: runs-on: macos-latest steps: - - uses: actions/checkout@v2 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 + - uses: actions/checkout@v3 with: repository: libbpf/libbpf path: libbpf diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index afe47cf0..5c68590f 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -18,14 +18,12 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: nightly components: rustfmt, clippy, miri, rust-src - override: true - name: Check formatting run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5933e1a4..9b00c0d7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -9,7 +9,7 @@ jobs: if: startsWith(github.ref, 'refs/tags/') runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 @@ -18,7 +18,7 @@ jobs: - name: Build Changelog id: github_release - uses: mikepenz/release-changelog-builder-action@v2 + uses: mikepenz/release-changelog-builder-action@v3 with: configuration: ".github/changelog-config.json" env: diff --git a/aya-bpf-macros/Cargo.toml b/aya-bpf-macros/Cargo.toml index a601444d..4c57a5bd 100644 --- a/aya-bpf-macros/Cargo.toml +++ b/aya-bpf-macros/Cargo.toml @@ -10,7 +10,7 @@ proc-macro = true [dependencies] proc-macro2 = "1.0" quote = "1.0" -syn = {version = "1.0", features = ["full"]} +syn = {version = "2.0", features = ["full"]} [dev-dependencies] aya-bpf = { path = "../bpf/aya-bpf" } diff --git a/aya-bpf-macros/src/expand.rs b/aya-bpf-macros/src/expand.rs index 506cfa89..b96db0fe 100644 --- a/aya-bpf-macros/src/expand.rs +++ b/aya-bpf-macros/src/expand.rs @@ -212,7 +212,7 @@ impl Xdp { frags = m } else { return Err(Error::new_spanned( - "mutlibuffer", + s, "invalid value. should be 'true' or 'false'", )); } @@ -613,18 +613,39 @@ impl RawTracePoint { pub struct Lsm { item: ItemFn, - name: String, + name: Option, + sleepable: bool, } impl Lsm { pub fn from_syn(mut args: Args, item: ItemFn) -> Result { - let name = name_arg(&mut args)?.unwrap_or_else(|| item.sig.ident.to_string()); - - Ok(Lsm { item, name }) + let name = pop_arg(&mut args, "name"); + let mut sleepable = false; + if let Some(s) = pop_arg(&mut args, "sleepable") { + if let Ok(m) = s.parse() { + sleepable = m + } else { + return Err(Error::new_spanned( + s, + "invalid value. should be 'true' or 'false'", + )); + } + } + err_on_unknown_args(&args)?; + Ok(Lsm { + item, + name, + sleepable, + }) } pub fn expand(&self) -> Result { - let section_name = format!("lsm/{}", self.name); + let section_prefix = if self.sleepable { "lsm.s" } else { "lsm" }; + let section_name = if let Some(name) = &self.name { + format!("{section_prefix}/{name}") + } else { + section_prefix.to_string() + }; let fn_vis = &self.item.vis; let fn_name = &self.item.sig.ident; let item = &self.item; diff --git a/aya-bpf-macros/src/lib.rs b/aya-bpf-macros/src/lib.rs index 6c3c0699..182a2fcd 100644 --- a/aya-bpf-macros/src/lib.rs +++ b/aya-bpf-macros/src/lib.rs @@ -118,6 +118,40 @@ pub fn cgroup_skb(attrs: TokenStream, item: TokenStream) -> TokenStream { .into() } +/// Marks a function as a [`CgroupSockAddr`] eBPF program. +/// +/// [`CgroupSockAddr`] programs can be used to inspect or modify socket addresses passed to +/// various syscalls within a [cgroup]. The `attach_type` argument specifies a place to attach +/// the eBPF program to. See [`CgroupSockAddrAttachType`] for more details. +/// +/// [cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html +/// [`CgroupSockAddrAttachType`]: ../aya/programs/cgroup_sock_addr/enum.CgroupSockAddrAttachType.html +/// [`CgroupSockAddr`]: ../aya/programs/cgroup_sock_addr/struct.CgroupSockAddr.html +/// +/// # Minimum kernel version +/// +/// The minimum kernel version required to use this feature is 4.17. +/// +/// # Examples +/// +/// ```no_run +/// use aya_bpf::{macros::cgroup_sock_addr, programs::SockAddrContext}; +/// +/// #[cgroup_sock_addr(connect4)] +/// pub fn connect4(ctx: SockAddrContext) -> i32 { +/// match try_connect4(ctx) { +/// Ok(ret) => ret, +/// Err(ret) => match ret.try_into() { +/// Ok(rt) => rt, +/// Err(_) => 1, +/// }, +/// } +/// } +/// +/// fn try_connect4(ctx: SockAddrContext) -> Result { +/// Ok(0) +/// } +/// ``` #[proc_macro_attribute] pub fn cgroup_sock_addr(attrs: TokenStream, item: TokenStream) -> TokenStream { let args = parse_macro_input!(attrs as SockAddrArgs); diff --git a/aya-log-common/Cargo.toml b/aya-log-common/Cargo.toml index 7413d8b7..5eb0d060 100644 --- a/aya-log-common/Cargo.toml +++ b/aya-log-common/Cargo.toml @@ -9,12 +9,7 @@ repository = "https://github.com/aya-rs/aya-log" documentation = "https://docs.rs/aya-log" edition = "2021" -[features] -default = [] -userspace = [ "aya" ] - [dependencies] -aya = { path = "../aya", version = "0.11.0", optional=true } num_enum = { version = "0.6", default-features = false } [lib] diff --git a/aya-log-common/src/lib.rs b/aya-log-common/src/lib.rs index 2e162a0f..f833f3c1 100644 --- a/aya-log-common/src/lib.rs +++ b/aya-log-common/src/lib.rs @@ -1,6 +1,6 @@ #![no_std] -use core::{cmp, mem, ptr, slice}; +use core::{mem, num, ptr}; use num_enum::IntoPrimitive; @@ -8,8 +8,10 @@ pub const LOG_BUF_CAPACITY: usize = 8192; pub const LOG_FIELDS: usize = 6; -#[repr(usize)] -#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] +pub type LogValueLength = u16; + +#[repr(u8)] +#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, IntoPrimitive)] pub enum Level { /// The "error" level. /// @@ -33,7 +35,7 @@ pub enum Level { Trace, } -#[repr(usize)] +#[repr(u8)] #[derive(Copy, Clone, Debug)] pub enum RecordField { Target = 1, @@ -46,7 +48,7 @@ pub enum RecordField { /// Types which are supported by aya-log and can be safely sent from eBPF /// programs to userspace. -#[repr(usize)] +#[repr(u8)] #[derive(Copy, Clone, Debug)] pub enum Argument { DisplayHint, @@ -73,6 +75,7 @@ pub enum Argument { /// `[u16; 8]` array which represents an IPv6 address. ArrU16Len8, + Bytes, Str, } @@ -96,64 +99,65 @@ pub enum DisplayHint { UpperMac, } -#[cfg(feature = "userspace")] -mod userspace { - use super::*; - - unsafe impl aya::Pod for RecordField {} - unsafe impl aya::Pod for Argument {} - unsafe impl aya::Pod for DisplayHint {} -} - -struct TagLenValue<'a, T> { - tag: T, - value: &'a [u8], +struct TagLenValue { + pub tag: T, + pub value: V, } -impl<'a, T> TagLenValue<'a, T> +impl TagLenValue where - T: Copy, + V: IntoIterator, + ::IntoIter: ExactSizeIterator, { - #[inline(always)] - pub(crate) fn new(tag: T, value: &'a [u8]) -> TagLenValue<'a, T> { - TagLenValue { tag, value } - } - - pub(crate) fn write(&self, mut buf: &mut [u8]) -> Result { - let size = mem::size_of::() + mem::size_of::() + self.value.len(); - let remaining = cmp::min(buf.len(), LOG_BUF_CAPACITY); - // Check if the size doesn't exceed the buffer bounds. - if size > remaining { + pub(crate) fn write(self, mut buf: &mut [u8]) -> Result { + // Break the abstraction to please the verifier. + if buf.len() > LOG_BUF_CAPACITY { + buf = &mut buf[..LOG_BUF_CAPACITY]; + } + let Self { tag, value } = self; + let value = value.into_iter(); + let len = value.len(); + let wire_len: LogValueLength = value + .len() + .try_into() + .map_err(|num::TryFromIntError { .. }| ())?; + let size = mem::size_of_val(&tag) + mem::size_of_val(&wire_len) + len; + if size > buf.len() { return Err(()); } - unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, self.tag) }; - buf = &mut buf[mem::size_of::()..]; + let tag_size = mem::size_of_val(&tag); + unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, tag) }; + buf = &mut buf[tag_size..]; - unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, self.value.len()) }; - buf = &mut buf[mem::size_of::()..]; + unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, wire_len) }; + buf = &mut buf[mem::size_of_val(&wire_len)..]; + + buf.iter_mut().zip(value).for_each(|(dst, src)| { + *dst = src; + }); - let len = cmp::min(buf.len(), self.value.len()); - // The verifier isn't happy with `len` being unbounded, so compare it - // with `LOG_BUF_CAPACITY`. - if len > LOG_BUF_CAPACITY { - return Err(()); - } - buf[..len].copy_from_slice(&self.value[..len]); Ok(size) } } +impl TagLenValue { + #[inline(always)] + pub(crate) fn new(tag: T, value: V) -> TagLenValue { + TagLenValue { tag, value } + } +} + pub trait WriteToBuf { #[allow(clippy::result_unit_err)] - fn write(&self, buf: &mut [u8]) -> Result; + fn write(self, buf: &mut [u8]) -> Result; } macro_rules! impl_write_to_buf { ($type:ident, $arg_type:expr) => { impl WriteToBuf for $type { - fn write(&self, buf: &mut [u8]) -> Result { - TagLenValue::::new($arg_type, &self.to_ne_bytes()).write(buf) + fn write(self, buf: &mut [u8]) -> Result { + TagLenValue::new($arg_type, self.to_ne_bytes()).write(buf) } } }; @@ -175,35 +179,40 @@ impl_write_to_buf!(f32, Argument::F32); impl_write_to_buf!(f64, Argument::F64); impl WriteToBuf for [u8; 16] { - fn write(&self, buf: &mut [u8]) -> Result { - TagLenValue::::new(Argument::ArrU8Len16, self).write(buf) + fn write(self, buf: &mut [u8]) -> Result { + TagLenValue::new(Argument::ArrU8Len16, self).write(buf) } } impl WriteToBuf for [u16; 8] { - fn write(&self, buf: &mut [u8]) -> Result { - let ptr = self.as_ptr().cast::(); - let bytes = unsafe { slice::from_raw_parts(ptr, 16) }; - TagLenValue::::new(Argument::ArrU16Len8, bytes).write(buf) + fn write(self, buf: &mut [u8]) -> Result { + let bytes = unsafe { core::mem::transmute::<_, [u8; 16]>(self) }; + TagLenValue::new(Argument::ArrU16Len8, bytes).write(buf) } } impl WriteToBuf for [u8; 6] { - fn write(&self, buf: &mut [u8]) -> Result { - TagLenValue::::new(Argument::ArrU8Len6, self).write(buf) + fn write(self, buf: &mut [u8]) -> Result { + TagLenValue::new(Argument::ArrU8Len6, self).write(buf) + } +} + +impl WriteToBuf for &[u8] { + fn write(self, buf: &mut [u8]) -> Result { + TagLenValue::new(Argument::Bytes, self.iter().copied()).write(buf) } } -impl WriteToBuf for str { - fn write(&self, buf: &mut [u8]) -> Result { - TagLenValue::::new(Argument::Str, self.as_bytes()).write(buf) +impl WriteToBuf for &str { + fn write(self, buf: &mut [u8]) -> Result { + TagLenValue::new(Argument::Str, self.as_bytes().iter().copied()).write(buf) } } impl WriteToBuf for DisplayHint { - fn write(&self, buf: &mut [u8]) -> Result { - let v: u8 = (*self).into(); - TagLenValue::::new(Argument::DisplayHint, &v.to_ne_bytes()).write(buf) + fn write(self, buf: &mut [u8]) -> Result { + let v: u8 = self.into(); + TagLenValue::new(Argument::DisplayHint, v.to_ne_bytes()).write(buf) } } @@ -219,17 +228,31 @@ pub fn write_record_header( line: u32, num_args: usize, ) -> Result { + let level: u8 = level.into(); let mut size = 0; - for attr in [ - TagLenValue::::new(RecordField::Target, target.as_bytes()), - TagLenValue::::new(RecordField::Level, &(level as usize).to_ne_bytes()), - TagLenValue::::new(RecordField::Module, module.as_bytes()), - TagLenValue::::new(RecordField::File, file.as_bytes()), - TagLenValue::::new(RecordField::Line, &line.to_ne_bytes()), - TagLenValue::::new(RecordField::NumArgs, &num_args.to_ne_bytes()), - ] { - size += attr.write(&mut buf[size..])?; - } - + size += TagLenValue::new(RecordField::Target, target.as_bytes().iter().copied()) + .write(&mut buf[size..])?; + size += TagLenValue::new(RecordField::Level, level.to_ne_bytes()).write(&mut buf[size..])?; + size += TagLenValue::new(RecordField::Module, module.as_bytes().iter().copied()) + .write(&mut buf[size..])?; + size += TagLenValue::new(RecordField::File, file.as_bytes().iter().copied()) + .write(&mut buf[size..])?; + size += TagLenValue::new(RecordField::Line, line.to_ne_bytes()).write(&mut buf[size..])?; + size += + TagLenValue::new(RecordField::NumArgs, num_args.to_ne_bytes()).write(&mut buf[size..])?; Ok(size) } + +#[cfg(test)] +mod test { + use super::*; + + fn log_value_length_sufficient() { + assert!( + LOG_BUF_CAPACITY >= LogValueLength::MAX.into(), + "{} < {}", + LOG_BUF_CAPACITY, + LogValueLength::MAX + ); + } +} diff --git a/aya-log-ebpf-macros/Cargo.toml b/aya-log-ebpf-macros/Cargo.toml index 5396ed16..88f5d550 100644 --- a/aya-log-ebpf-macros/Cargo.toml +++ b/aya-log-ebpf-macros/Cargo.toml @@ -8,7 +8,7 @@ aya-log-common = { path = "../aya-log-common" } aya-log-parser = { path = "../aya-log-parser" } proc-macro2 = "1.0" quote = "1.0" -syn = "1.0" +syn = "2.0" [lib] proc-macro = true diff --git a/aya-log-ebpf-macros/src/expand.rs b/aya-log-ebpf-macros/src/expand.rs index 1989fe97..eefcbb72 100644 --- a/aya-log-ebpf-macros/src/expand.rs +++ b/aya-log-ebpf-macros/src/expand.rs @@ -151,12 +151,11 @@ pub(crate) fn log(args: LogArgs, level: Option) -> Result(record_len) #( .and_then(|record_len| { if record_len >= buf.buf.len() { return Err(()); } - { #values_iter }.write(&mut buf.buf[record_len..]).map(|len| record_len + len) + aya_log_ebpf::WriteToBuf::write({ #values_iter }, &mut buf.buf[record_len..]).map(|len| record_len + len) }) )* } { unsafe { ::aya_log_ebpf::AYA_LOGS.output( diff --git a/aya-log/Cargo.toml b/aya-log/Cargo.toml index adcdc7a5..5fb83a32 100644 --- a/aya-log/Cargo.toml +++ b/aya-log/Cargo.toml @@ -12,7 +12,7 @@ edition = "2021" [dependencies] aya = { path = "../aya", version = "0.11.0", features=["async_tokio"] } -aya-log-common = { path = "../aya-log-common", version = "0.1.13", features=["userspace"] } +aya-log-common = { path = "../aya-log-common", version = "0.1.13" } thiserror = "1" log = "0.4" bytes = "1.1" diff --git a/aya-log/src/lib.rs b/aya-log/src/lib.rs index 4ba1d784..92d686bb 100644 --- a/aya-log/src/lib.rs +++ b/aya-log/src/lib.rs @@ -59,9 +59,11 @@ use std::{ const MAP_NAME: &str = "AYA_LOGS"; -use aya_log_common::{Argument, DisplayHint, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS}; +use aya_log_common::{ + Argument, DisplayHint, Level, LogValueLength, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS, +}; use bytes::BytesMut; -use log::{error, Level, Log, Record}; +use log::{error, Log, Record}; use thiserror::Error; use aya::{ @@ -73,6 +75,20 @@ use aya::{ Bpf, Pod, }; +#[derive(Copy, Clone)] +#[repr(transparent)] +struct RecordFieldWrapper(RecordField); +#[derive(Copy, Clone)] +#[repr(transparent)] +struct ArgumentWrapper(Argument); +#[derive(Copy, Clone)] +#[repr(transparent)] +struct DisplayHintWrapper(DisplayHint); + +unsafe impl aya::Pod for RecordFieldWrapper {} +unsafe impl aya::Pod for ArgumentWrapper {} +unsafe impl aya::Pod for DisplayHintWrapper {} + /// Log messages generated by `aya_log_ebpf` using the [log] crate. /// /// For more details see the [module level documentation](crate). @@ -102,9 +118,7 @@ impl BpfLogger { let log = logger.clone(); tokio::spawn(async move { - let mut buffers = (0..10) - .map(|_| BytesMut::with_capacity(LOG_BUF_CAPACITY)) - .collect::>(); + let mut buffers = vec![BytesMut::with_capacity(LOG_BUF_CAPACITY); 10]; loop { let events = buf.read_events(&mut buffers).await.unwrap(); @@ -146,6 +160,20 @@ where } } +pub struct LowerHexDebugFormatter; +impl Formatter<&[T]> for LowerHexDebugFormatter +where + T: LowerHex, +{ + fn format(v: &[T]) -> String { + let mut s = String::new(); + for v in v { + let () = core::fmt::write(&mut s, format_args!("{v:x}")).unwrap(); + } + s + } +} + pub struct UpperHexFormatter; impl Formatter for UpperHexFormatter where @@ -156,6 +184,20 @@ where } } +pub struct UpperHexDebugFormatter; +impl Formatter<&[T]> for UpperHexDebugFormatter +where + T: UpperHex, +{ + fn format(v: &[T]) -> String { + let mut s = String::new(); + for v in v { + let () = core::fmt::write(&mut s, format_args!("{v:X}")).unwrap(); + } + s + } +} + pub struct Ipv4Formatter; impl Formatter for Ipv4Formatter where @@ -197,12 +239,22 @@ impl Formatter<[u8; 6]> for UpperMacFormatter { } trait Format { - fn format(&self, last_hint: Option) -> Result; + fn format(&self, last_hint: Option) -> Result; +} + +impl Format for &[u8] { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { + Some(DisplayHint::LowerHex) => Ok(LowerHexDebugFormatter::format(self)), + Some(DisplayHint::UpperHex) => Ok(UpperHexDebugFormatter::format(self)), + _ => Err(()), + } + } } impl Format for u32 { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)), Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)), @@ -216,8 +268,8 @@ impl Format for u32 { } impl Format for [u8; 6] { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Err(()), Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()), @@ -231,8 +283,8 @@ impl Format for [u8; 6] { } impl Format for [u8; 16] { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Err(()), Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()), @@ -246,8 +298,8 @@ impl Format for [u8; 16] { } impl Format for [u16; 8] { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Err(()), Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()), @@ -263,8 +315,8 @@ impl Format for [u16; 8] { macro_rules! impl_format { ($type:ident) => { impl Format for $type { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)), Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)), @@ -293,8 +345,8 @@ impl_format!(usize); macro_rules! impl_format_float { ($type:ident) => { impl Format for $type { - fn format(&self, last_hint: Option) -> Result { - match last_hint { + fn format(&self, last_hint: Option) -> Result { + match last_hint.map(|DisplayHintWrapper(dh)| dh) { Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()), @@ -346,33 +398,42 @@ pub enum Error { fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> { let mut target = None; - let mut level = Level::Trace; + let mut level = None; let mut module = None; let mut file = None; let mut line = None; let mut num_args = None; for _ in 0..LOG_FIELDS { - let (attr, rest) = unsafe { TagLenValue::<'_, RecordField>::try_read(buf)? }; + let (RecordFieldWrapper(tag), value, rest) = try_read(buf)?; - match attr.tag { + match tag { RecordField::Target => { - target = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); + target = Some(str::from_utf8(value).map_err(|_| ())?); } RecordField::Level => { - level = unsafe { ptr::read_unaligned(attr.value.as_ptr() as *const _) } + level = Some({ + let level = unsafe { ptr::read_unaligned(value.as_ptr() as *const _) }; + match level { + Level::Error => log::Level::Error, + Level::Warn => log::Level::Warn, + Level::Info => log::Level::Info, + Level::Debug => log::Level::Debug, + Level::Trace => log::Level::Trace, + } + }) } RecordField::Module => { - module = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); + module = Some(str::from_utf8(value).map_err(|_| ())?); } RecordField::File => { - file = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); + file = Some(str::from_utf8(value).map_err(|_| ())?); } RecordField::Line => { - line = Some(u32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?)); + line = Some(u32::from_ne_bytes(value.try_into().map_err(|_| ())?)); } RecordField::NumArgs => { - num_args = Some(usize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?)); + num_args = Some(usize::from_ne_bytes(value.try_into().map_err(|_| ())?)); } } @@ -380,103 +441,106 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> { } let mut full_log_msg = String::new(); - let mut last_hint: Option = None; + let mut last_hint: Option = None; for _ in 0..num_args.ok_or(())? { - let (attr, rest) = unsafe { TagLenValue::<'_, Argument>::try_read(buf)? }; + let (ArgumentWrapper(tag), value, rest) = try_read(buf)?; - match attr.tag { + match tag { Argument::DisplayHint => { - last_hint = Some(unsafe { ptr::read_unaligned(attr.value.as_ptr() as *const _) }); + last_hint = Some(unsafe { ptr::read_unaligned(value.as_ptr() as *const _) }); } Argument::I8 => { full_log_msg.push_str( - &i8::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &i8::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::I16 => { full_log_msg.push_str( - &i16::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &i16::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::I32 => { full_log_msg.push_str( - &i32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &i32::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::I64 => { full_log_msg.push_str( - &i64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &i64::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::Isize => { full_log_msg.push_str( - &isize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &isize::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::U8 => { full_log_msg.push_str( - &u8::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &u8::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::U16 => { full_log_msg.push_str( - &u16::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &u16::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::U32 => { full_log_msg.push_str( - &u32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &u32::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::U64 => { full_log_msg.push_str( - &u64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &u64::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::Usize => { full_log_msg.push_str( - &usize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &usize::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::F32 => { full_log_msg.push_str( - &f32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &f32::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::F64 => { full_log_msg.push_str( - &f64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) + &f64::from_ne_bytes(value.try_into().map_err(|_| ())?) .format(last_hint.take())?, ); } Argument::ArrU8Len6 => { - let value: [u8; 6] = attr.value.try_into().map_err(|_| ())?; + let value: [u8; 6] = value.try_into().map_err(|_| ())?; full_log_msg.push_str(&value.format(last_hint.take())?); } Argument::ArrU8Len16 => { - let value: [u8; 16] = attr.value.try_into().map_err(|_| ())?; + let value: [u8; 16] = value.try_into().map_err(|_| ())?; full_log_msg.push_str(&value.format(last_hint.take())?); } Argument::ArrU16Len8 => { - let data: [u8; 16] = attr.value.try_into().map_err(|_| ())?; + let data: [u8; 16] = value.try_into().map_err(|_| ())?; let mut value: [u16; 8] = Default::default(); for (i, s) in data.chunks_exact(2).enumerate() { value[i] = ((s[1] as u16) << 8) | s[0] as u16; } full_log_msg.push_str(&value.format(last_hint.take())?); } - Argument::Str => match str::from_utf8(attr.value) { + Argument::Bytes => { + full_log_msg.push_str(&value.format(last_hint.take())?); + } + Argument::Str => match str::from_utf8(value) { Ok(v) => { full_log_msg.push_str(v); } @@ -491,7 +555,7 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> { &Record::builder() .args(format_args!("{full_log_msg}")) .target(target.ok_or(())?) - .level(level) + .level(level.ok_or(())?) .module_path(module) .file(file) .line(line) @@ -501,42 +565,32 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> { Ok(()) } -struct TagLenValue<'a, T: Pod> { - tag: T, - value: &'a [u8], -} - -impl<'a, T: Pod> TagLenValue<'a, T> { - unsafe fn try_read(mut buf: &'a [u8]) -> Result<(TagLenValue<'a, T>, &'a [u8]), ()> { - if buf.len() < mem::size_of::() + mem::size_of::() { - return Err(()); - } - - let tag = ptr::read_unaligned(buf.as_ptr() as *const T); - buf = &buf[mem::size_of::()..]; +fn try_read(mut buf: &[u8]) -> Result<(T, &[u8], &[u8]), ()> { + if buf.len() < mem::size_of::() + mem::size_of::() { + return Err(()); + } - let len = usize::from_ne_bytes(buf[..mem::size_of::()].try_into().unwrap()); - buf = &buf[mem::size_of::()..]; + let tag = unsafe { ptr::read_unaligned(buf.as_ptr() as *const T) }; + buf = &buf[mem::size_of::()..]; - if buf.len() < len { - return Err(()); - } + let len = + LogValueLength::from_ne_bytes(buf[..mem::size_of::()].try_into().unwrap()); + buf = &buf[mem::size_of::()..]; - Ok(( - TagLenValue { - tag, - value: &buf[..len], - }, - &buf[len..], - )) + let len: usize = len.into(); + if buf.len() < len { + return Err(()); } + + let (value, rest) = buf.split_at(len); + Ok((tag, value, rest)) } #[cfg(test)] mod test { use super::*; use aya_log_common::{write_record_header, WriteToBuf}; - use log::logger; + use log::{logger, Level}; fn new_log(args: usize) -> Result<(usize, Vec), ()> { let mut buf = vec![0; 8192]; @@ -555,14 +609,14 @@ mod test { #[test] fn test_str() { testing_logger::setup(); - let (len, mut input) = new_log(1).unwrap(); + let (mut len, mut input) = new_log(1).unwrap(); - "test" - .write(&mut input[len..]) - .expect("could not write to the buffer"); + len += "test".write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "test"); @@ -575,13 +629,13 @@ mod test { testing_logger::setup(); let (mut len, mut input) = new_log(2).unwrap(); - len += "hello " - .write(&mut input[len..]) - .expect("could not write to the buffer"); - "test".write(&mut input[len..]).unwrap(); + len += "hello ".write(&mut input[len..]).unwrap(); + len += "test".write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "hello test"); @@ -589,6 +643,49 @@ mod test { }); } + #[test] + fn test_bytes() { + testing_logger::setup(); + let (mut len, mut input) = new_log(2).unwrap(); + + len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap(); + len += [0xde, 0xad].write(&mut input[len..]).unwrap(); + + _ = len; + + let logger = logger(); + let () = log_buf(&input, logger).unwrap(); + testing_logger::validate(|captured_logs| { + assert_eq!(captured_logs.len(), 1); + assert_eq!(captured_logs[0].body, "dead"); + assert_eq!(captured_logs[0].level, Level::Info); + }); + } + + #[test] + fn test_bytes_with_args() { + testing_logger::setup(); + let (mut len, mut input) = new_log(5).unwrap(); + + len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap(); + len += [0xde, 0xad].write(&mut input[len..]).unwrap(); + + len += " ".write(&mut input[len..]).unwrap(); + + len += DisplayHint::UpperHex.write(&mut input[len..]).unwrap(); + len += [0xbe, 0xef].write(&mut input[len..]).unwrap(); + + _ = len; + + let logger = logger(); + let () = log_buf(&input, logger).unwrap(); + testing_logger::validate(|captured_logs| { + assert_eq!(captured_logs.len(), 1); + assert_eq!(captured_logs[0].body, "dead BEEF"); + assert_eq!(captured_logs[0].level, Level::Info); + }); + } + #[test] fn test_display_hint_default() { testing_logger::setup(); @@ -596,10 +693,12 @@ mod test { len += "default hint: ".write(&mut input[len..]).unwrap(); len += DisplayHint::Default.write(&mut input[len..]).unwrap(); - 14.write(&mut input[len..]).unwrap(); + len += 14.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "default hint: 14"); @@ -614,10 +713,12 @@ mod test { len += "lower hex: ".write(&mut input[len..]).unwrap(); len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap(); - 200.write(&mut input[len..]).unwrap(); + len += 200.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "lower hex: c8"); @@ -632,10 +733,12 @@ mod test { len += "upper hex: ".write(&mut input[len..]).unwrap(); len += DisplayHint::UpperHex.write(&mut input[len..]).unwrap(); - 200.write(&mut input[len..]).unwrap(); + len += 200.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "upper hex: C8"); @@ -651,10 +754,12 @@ mod test { len += "ipv4: ".write(&mut input[len..]).unwrap(); len += DisplayHint::Ipv4.write(&mut input[len..]).unwrap(); // 10.0.0.1 as u32 - 167772161u32.write(&mut input[len..]).unwrap(); + len += 167772161u32.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1"); @@ -674,10 +779,12 @@ mod test { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, ]; - ipv6_arr.write(&mut input[len..]).unwrap(); + len += ipv6_arr.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1"); @@ -696,10 +803,12 @@ mod test { let ipv6_arr: [u16; 8] = [ 0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, ]; - ipv6_arr.write(&mut input[len..]).unwrap(); + len += ipv6_arr.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1"); @@ -716,10 +825,12 @@ mod test { len += DisplayHint::LowerMac.write(&mut input[len..]).unwrap(); // 00:00:5e:00:53:af as byte array let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf]; - mac_arr.write(&mut input[len..]).unwrap(); + len += mac_arr.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "mac: 00:00:5e:00:53:af"); @@ -736,10 +847,12 @@ mod test { len += DisplayHint::UpperMac.write(&mut input[len..]).unwrap(); // 00:00:5E:00:53:AF as byte array let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf]; - mac_arr.write(&mut input[len..]).unwrap(); + len += mac_arr.write(&mut input[len..]).unwrap(); + + _ = len; let logger = logger(); - let _ = log_buf(&input, logger); + let () = log_buf(&input, logger).unwrap(); testing_logger::validate(|captured_logs| { assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs[0].body, "mac: 00:00:5E:00:53:AF"); diff --git a/aya-obj/Cargo.toml b/aya-obj/Cargo.toml index 9ede2431..d20b1930 100644 --- a/aya-obj/Cargo.toml +++ b/aya-obj/Cargo.toml @@ -13,15 +13,14 @@ edition = "2021" [dependencies] bytes = "1" log = "0.4" -object = { version = "0.30", default-features = false, features = ["read_core", "elf"] } -hashbrown = { version = "0.13", optional = true } -thiserror-std = { package = "thiserror", version = "1" } -thiserror-core = { version = "1", default-features = false, features = [], optional = true } +object = { version = "0.31", default-features = false, features = ["read_core", "elf"] } +hashbrown = { version = "0.13" } +thiserror = { version = "1", default-features = false } +core-error = { version = "0.0.0" } [dev-dependencies] matches = "0.1.8" rbpf = "0.1.0" [features] -default = [] -no_std = ["hashbrown", "thiserror-core"] +std = [] diff --git a/aya-obj/src/btf/btf.rs b/aya-obj/src/btf/btf.rs index ac915bfd..325a8aef 100644 --- a/aya-obj/src/btf/btf.rs +++ b/aya-obj/src/btf/btf.rs @@ -21,18 +21,20 @@ use crate::{ IntEncoding, LineInfo, Struct, Typedef, VarLinkage, }, generated::{btf_ext_header, btf_header}, - thiserror::{self, Error}, util::{bytes_of, HashMap}, Object, }; +#[cfg(not(feature = "std"))] +use crate::std; + pub(crate) const MAX_RESOLVE_DEPTH: u8 = 32; pub(crate) const MAX_SPEC_LEN: usize = 64; /// The error type returned when `BTF` operations fail. -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] pub enum BtfError { - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] /// Error parsing file #[error("error parsing {path}")] FileError { @@ -126,7 +128,7 @@ pub enum BtfError { type_id: u32, }, - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] /// Loading the btf failed #[error("the BPF_BTF_LOAD syscall failed. Verifier output: {verifier_log}")] LoadError { @@ -232,13 +234,13 @@ impl Btf { } /// Loads BTF metadata from `/sys/kernel/btf/vmlinux`. - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] pub fn from_sys_fs() -> Result { Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()) } /// Loads BTF metadata from the given `path`. - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] pub fn parse_file>( path: P, endianness: Endianness, @@ -432,6 +434,19 @@ impl Btf { // Sanitize DATASEC if they are not supported BtfType::DataSec(d) if !features.btf_datasec => { debug!("{}: not supported. replacing with STRUCT", kind); + + // STRUCT aren't allowed to have "." in their name, fixup this if needed. + let mut name_offset = t.name_offset(); + let sec_name = self.string_at(name_offset)?; + let name = sec_name.to_string(); + + // Handle any "." characters in struct names + // Example: ".maps" + let fixed_name = name.replace('.', "_"); + if fixed_name != name { + name_offset = self.add_string(fixed_name); + } + let mut members = vec![]; for member in d.entries.iter() { let mt = types.type_by_id(member.btf_type).unwrap(); @@ -441,7 +456,9 @@ impl Btf { offset: member.offset * 8, }) } - types.types[i] = BtfType::Struct(Struct::new(t.name_offset(), members, 0)); + + types.types[i] = + BtfType::Struct(Struct::new(name_offset, members, d.entries.len() as u32)); } // Fixup DATASEC // DATASEC sizes aren't always set by LLVM @@ -514,7 +531,7 @@ impl Btf { // Fixup FUNC_PROTO BtfType::FuncProto(ty) if features.btf_func => { let mut ty = ty.clone(); - for (i, mut param) in ty.params.iter_mut().enumerate() { + for (i, param) in ty.params.iter_mut().enumerate() { if param.name_offset == 0 && param.btf_type != 0 { param.name_offset = self.add_string(format!("param{i}")); } @@ -536,22 +553,39 @@ impl Btf { types.types[i] = enum_type; } // Sanitize FUNC - BtfType::Func(ty) if !features.btf_func => { - debug!("{}: not supported. replacing with TYPEDEF", kind); - let typedef_type = BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type)); - types.types[i] = typedef_type; - } - // Sanitize BTF_FUNC_GLOBAL - BtfType::Func(ty) if !features.btf_func_global => { - let mut fixed_ty = ty.clone(); - if ty.linkage() == FuncLinkage::Global { - debug!( - "{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC", - kind - ); - fixed_ty.set_linkage(FuncLinkage::Static); + BtfType::Func(ty) => { + let name = self.string_at(ty.name_offset)?; + // Sanitize FUNC + if !features.btf_func { + debug!("{}: not supported. replacing with TYPEDEF", kind); + let typedef_type = + BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type)); + types.types[i] = typedef_type; + } else if !features.btf_func_global + || name == "memset" + || name == "memcpy" + || name == "memmove" + || name == "memcmp" + { + // Sanitize BTF_FUNC_GLOBAL when not supported and ensure that + // memory builtins are marked as static. Globals are type checked + // and verified separately from their callers, while instead we + // want tracking info (eg bound checks) to be propagated to the + // memory builtins. + let mut fixed_ty = ty.clone(); + if ty.linkage() == FuncLinkage::Global { + if !features.btf_func_global { + debug!( + "{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC", + kind + ); + } else { + debug!("changing FUNC {name} linkage to BTF_FUNC_STATIC"); + } + fixed_ty.set_linkage(FuncLinkage::Static); + } + types.types[i] = BtfType::Func(fixed_ty); } - types.types[i] = BtfType::Func(fixed_ty); } // Sanitize FLOAT BtfType::Float(ty) if !features.btf_float => { @@ -1116,7 +1150,7 @@ mod tests { VarLinkage::Static, ))); - let name_offset = btf.add_string(".data".to_string()); + let name_offset = btf.add_string("data".to_string()); let variables = vec![DataSecEntry { btf_type: var_type_id, offset: 0, @@ -1350,6 +1384,60 @@ mod tests { Btf::parse(&raw, Endianness::default()).unwrap(); } + #[test] + fn test_sanitize_mem_builtins() { + let mut btf = Btf::new(); + let name_offset = btf.add_string("int".to_string()); + let int_type_id = btf.add_type(BtfType::Int(Int::new( + name_offset, + 4, + IntEncoding::Signed, + 0, + ))); + + let params = vec![ + BtfParam { + name_offset: btf.add_string("a".to_string()), + btf_type: int_type_id, + }, + BtfParam { + name_offset: btf.add_string("b".to_string()), + btf_type: int_type_id, + }, + ]; + let func_proto_type_id = + btf.add_type(BtfType::FuncProto(FuncProto::new(params, int_type_id))); + + let builtins = ["memset", "memcpy", "memcmp", "memmove"]; + for fname in builtins { + let func_name_offset = btf.add_string(fname.to_string()); + let func_type_id = btf.add_type(BtfType::Func(Func::new( + func_name_offset, + func_proto_type_id, + FuncLinkage::Global, + ))); + + let features = BtfFeatures { + btf_func: true, + btf_func_global: true, // to force function name check + ..Default::default() + }; + + btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features) + .unwrap(); + + if let BtfType::Func(fixed) = btf.type_by_id(func_type_id).unwrap() { + assert!(fixed.linkage() == FuncLinkage::Static); + } else { + panic!("not a func") + } + + // Ensure we can convert to bytes and back again + let raw = btf.to_bytes(); + Btf::parse(&raw, Endianness::default()).unwrap(); + } + } + #[test] fn test_sanitize_float() { let mut btf = Btf::new(); @@ -1442,7 +1530,7 @@ mod tests { } #[test] - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] #[cfg_attr(miri, ignore)] fn test_read_btf_from_sys_fs() { let btf = Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()).unwrap(); diff --git a/aya-obj/src/btf/relocation.rs b/aya-obj/src/btf/relocation.rs index 7b8cf002..3098ab7f 100644 --- a/aya-obj/src/btf/relocation.rs +++ b/aya-obj/src/btf/relocation.rs @@ -17,13 +17,15 @@ use crate::{ bpf_core_relo, bpf_core_relo_kind::*, bpf_insn, BPF_ALU, BPF_ALU64, BPF_B, BPF_DW, BPF_H, BPF_K, BPF_LD, BPF_LDX, BPF_ST, BPF_STX, BPF_W, BTF_INT_SIGNED, }, - thiserror::{self, Error}, util::HashMap, Object, Program, ProgramSection, }; +#[cfg(not(feature = "std"))] +use crate::std; + /// The error type returned by [`Object::relocate_btf`]. -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] #[error("error relocating `{section}`")] pub struct BtfRelocationError { /// The function name @@ -34,9 +36,9 @@ pub struct BtfRelocationError { } /// Relocation failures -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] enum RelocationError { - #[cfg(not(feature = "no_std"))] + #[cfg(feature = "std")] /// I/O error #[error(transparent)] IOError(#[from] std::io::Error), @@ -853,7 +855,7 @@ impl ComputedRelocation { let instructions = &mut program.function.instructions; let num_instructions = instructions.len(); let ins_index = rel.ins_offset / mem::size_of::(); - let mut ins = + let ins = instructions .get_mut(ins_index) .ok_or(RelocationError::InvalidInstructionIndex { @@ -932,7 +934,7 @@ impl ComputedRelocation { } BPF_LD => { ins.imm = target_value as i32; - let mut next_ins = instructions.get_mut(ins_index + 1).ok_or( + let next_ins = instructions.get_mut(ins_index + 1).ok_or( RelocationError::InvalidInstructionIndex { index: ins_index + 1, num_instructions, diff --git a/aya-obj/src/lib.rs b/aya-obj/src/lib.rs index 3775295e..a72a8eef 100644 --- a/aya-obj/src/lib.rs +++ b/aya-obj/src/lib.rs @@ -63,16 +63,17 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![deny(clippy::all, missing_docs)] #![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)] -#![cfg_attr(feature = "no_std", feature(error_in_core))] - -#[cfg(feature = "no_std")] -pub(crate) use thiserror_core as thiserror; -#[cfg(not(feature = "no_std"))] -pub(crate) use thiserror_std as thiserror; extern crate alloc; -#[cfg(not(feature = "no_std"))] +#[cfg(feature = "std")] extern crate std; +#[cfg(not(feature = "std"))] +mod std { + pub mod error { + pub use core_error::Error; + } + pub use core::*; +} pub mod btf; pub mod generated; diff --git a/aya-obj/src/maps.rs b/aya-obj/src/maps.rs index 22a88da9..dbffed0c 100644 --- a/aya-obj/src/maps.rs +++ b/aya-obj/src/maps.rs @@ -2,12 +2,12 @@ use core::mem; -use crate::{ - thiserror::{self, Error}, - BpfSectionKind, -}; +use crate::BpfSectionKind; use alloc::vec::Vec; +#[cfg(not(feature = "std"))] +use crate::std; + /// Invalid map type encontered pub struct InvalidMapTypeError { /// The map type @@ -94,7 +94,7 @@ pub enum PinningType { } /// The error type returned when failing to parse a [PinningType] -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] pub enum PinningError { /// Unsupported pinning type #[error("unsupported pinning type `{pinning_type}`")] diff --git a/aya-obj/src/obj.rs b/aya-obj/src/obj.rs index 1a376b58..773d2930 100644 --- a/aya-obj/src/obj.rs +++ b/aya-obj/src/obj.rs @@ -15,12 +15,16 @@ use object::{ }; use crate::{ + btf::BtfFeatures, + generated::{BPF_CALL, BPF_JMP, BPF_K}, maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE}, relocation::*, - thiserror::{self, Error}, util::HashMap, }; +#[cfg(not(feature = "std"))] +use crate::std; + use crate::{ btf::{Btf, BtfError, BtfExt, BtfType}, generated::{bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_F_RDONLY_PROG}, @@ -33,6 +37,17 @@ use crate::btf::{Array, DataSecEntry, FuncSecInfo, LineSecInfo}; const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE; +/// Features implements BPF and BTF feature detection +#[derive(Default, Debug)] +#[allow(missing_docs)] +pub struct Features { + pub bpf_name: bool, + pub bpf_probe_read_kernel: bool, + pub bpf_perf_link: bool, + pub bpf_global_data: bool, + pub btf: Option, +} + /// The loaded object file representation #[derive(Clone)] pub struct Object { @@ -147,7 +162,7 @@ pub struct Function { /// - `uprobe.s+` or `uretprobe.s+` /// - `usdt+` /// - `kprobe.multi+` or `kretprobe.multi+`: `BPF_TRACE_KPROBE_MULTI` -/// - `lsm_cgroup+` or `lsm.s+` +/// - `lsm_cgroup+` /// - `lwt_in`, `lwt_out`, `lwt_seg6local`, `lwt_xmit` /// - `raw_tp.w+`, `raw_tracepoint.w+` /// - `action` @@ -182,7 +197,7 @@ pub enum ProgramSection { }, Xdp { name: String, - frags_supported: bool, + frags: bool, }, SkMsg { name: String, @@ -230,6 +245,7 @@ pub enum ProgramSection { }, Lsm { name: String, + sleepable: bool, }, BtfTracePoint { name: String, @@ -280,7 +296,7 @@ impl ProgramSection { ProgramSection::LircMode2 { name } => name, ProgramSection::PerfEvent { name } => name, ProgramSection::RawTracePoint { name } => name, - ProgramSection::Lsm { name } => name, + ProgramSection::Lsm { name, .. } => name, ProgramSection::BtfTracePoint { name } => name, ProgramSection::FEntry { name } => name, ProgramSection::FExit { name } => name, @@ -312,14 +328,8 @@ impl FromStr for ProgramSection { "kretprobe" => KRetProbe { name }, "uprobe" => UProbe { name }, "uretprobe" => URetProbe { name }, - "xdp" => Xdp { - name, - frags_supported: false, - }, - "xdp.frags" => Xdp { - name, - frags_supported: true, - }, + "xdp" => Xdp { name, frags: false }, + "xdp.frags" => Xdp { name, frags: true }, "tp_btf" => BtfTracePoint { name }, _ if kind.starts_with("tracepoint") || kind.starts_with("tp") => { // tracepoint sections are named `tracepoint/category/event_name`, @@ -471,7 +481,14 @@ impl FromStr for ProgramSection { "lirc_mode2" => LircMode2 { name }, "perf_event" => PerfEvent { name }, "raw_tp" | "raw_tracepoint" => RawTracePoint { name }, - "lsm" => Lsm { name }, + "lsm" => Lsm { + name, + sleepable: false, + }, + "lsm.s" => Lsm { + name, + sleepable: true, + }, "fentry" => FEntry { name }, "fexit" => FExit { name }, "freplace" => Extension { name }, @@ -878,6 +895,52 @@ impl Object { Ok(()) } + + /// Sanitize BPF programs. + pub fn sanitize_programs(&mut self, features: &Features) { + for program in self.programs.values_mut() { + program.sanitize(features); + } + } +} + +fn insn_is_helper_call(ins: &bpf_insn) -> bool { + let klass = (ins.code & 0x07) as u32; + let op = (ins.code & 0xF0) as u32; + let src = (ins.code & 0x08) as u32; + + klass == BPF_JMP && op == BPF_CALL && src == BPF_K && ins.src_reg() == 0 && ins.dst_reg() == 0 +} + +const BPF_FUNC_PROBE_READ: i32 = 4; +const BPF_FUNC_PROBE_READ_STR: i32 = 45; +const BPF_FUNC_PROBE_READ_USER: i32 = 112; +const BPF_FUNC_PROBE_READ_KERNEL: i32 = 113; +const BPF_FUNC_PROBE_READ_USER_STR: i32 = 114; +const BPF_FUNC_PROBE_READ_KERNEL_STR: i32 = 115; + +impl Program { + fn sanitize(&mut self, features: &Features) { + for inst in &mut self.function.instructions { + if !insn_is_helper_call(inst) { + continue; + } + + match inst.imm { + BPF_FUNC_PROBE_READ_USER | BPF_FUNC_PROBE_READ_KERNEL + if !features.bpf_probe_read_kernel => + { + inst.imm = BPF_FUNC_PROBE_READ; + } + BPF_FUNC_PROBE_READ_USER_STR | BPF_FUNC_PROBE_READ_KERNEL_STR + if !features.bpf_probe_read_kernel => + { + inst.imm = BPF_FUNC_PROBE_READ_STR; + } + _ => {} + } + } + } } // Parses multiple map definition contained in a single `maps` section (which is @@ -920,7 +983,7 @@ fn parse_maps_section<'a, I: Iterator>( } /// Errors caught during parsing the object file -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] #[allow(missing_docs)] pub enum ParseError { #[error("error parsing ELF data")] @@ -1031,11 +1094,12 @@ impl BpfSectionKind { BpfSectionKind::BtfMaps } else if name.starts_with(".text") { BpfSectionKind::Text - } else if name.starts_with(".bss") - || name.starts_with(".data") - || name.starts_with(".rodata") - { + } else if name.starts_with(".bss") { + BpfSectionKind::Bss + } else if name.starts_with(".data") { BpfSectionKind::Data + } else if name.starts_with(".rodata") { + BpfSectionKind::Rodata } else if name == ".BTF" { BpfSectionKind::Btf } else if name == ".BTF.ext" { @@ -1816,7 +1880,7 @@ mod tests { assert_matches!( obj.programs.get("foo"), Some(Program { - section: ProgramSection::Xdp { .. }, + section: ProgramSection::Xdp { frags: false, .. }, .. }) ); @@ -1837,10 +1901,7 @@ mod tests { assert_matches!( obj.programs.get("foo"), Some(Program { - section: ProgramSection::Xdp { - frags_supported: true, - .. - }, + section: ProgramSection::Xdp { frags: true, .. }, .. }) ); @@ -1898,7 +1959,34 @@ mod tests { assert_matches!( obj.programs.get("foo"), Some(Program { - section: ProgramSection::Lsm { .. }, + section: ProgramSection::Lsm { + sleepable: false, + .. + }, + .. + }) + ); + } + + #[test] + fn test_parse_section_lsm_sleepable() { + let mut obj = fake_obj(); + + assert_matches!( + obj.parse_section(fake_section( + BpfSectionKind::Program, + "lsm.s/foo", + bytes_of(&fake_ins()) + )), + Ok(()) + ); + assert_matches!( + obj.programs.get("foo"), + Some(Program { + section: ProgramSection::Lsm { + sleepable: true, + .. + }, .. }) ); diff --git a/aya-obj/src/programs/cgroup_sock.rs b/aya-obj/src/programs/cgroup_sock.rs index b9194e24..227e26d5 100644 --- a/aya-obj/src/programs/cgroup_sock.rs +++ b/aya-obj/src/programs/cgroup_sock.rs @@ -1,10 +1,10 @@ //! Cgroup socket programs. use alloc::{borrow::ToOwned, string::String}; -use crate::{ - generated::bpf_attach_type, - thiserror::{self, Error}, -}; +use crate::generated::bpf_attach_type; + +#[cfg(not(feature = "std"))] +use crate::std; /// Defines where to attach a `CgroupSock` program. #[derive(Copy, Clone, Debug, Default)] @@ -31,7 +31,7 @@ impl From for bpf_attach_type { } } -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] #[error("{0} is not a valid attach type for a CGROUP_SOCK program")] pub(crate) struct InvalidAttachType(String); diff --git a/aya-obj/src/programs/cgroup_sock_addr.rs b/aya-obj/src/programs/cgroup_sock_addr.rs index 39db0ae3..6bd4070e 100644 --- a/aya-obj/src/programs/cgroup_sock_addr.rs +++ b/aya-obj/src/programs/cgroup_sock_addr.rs @@ -1,10 +1,10 @@ //! Cgroup socket address programs. use alloc::{borrow::ToOwned, string::String}; -use crate::{ - generated::bpf_attach_type, - thiserror::{self, Error}, -}; +use crate::generated::bpf_attach_type; + +#[cfg(not(feature = "std"))] +use crate::std; /// Defines where to attach a `CgroupSockAddr` program. #[derive(Copy, Clone, Debug)] @@ -54,7 +54,7 @@ impl From for bpf_attach_type { } } -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] #[error("{0} is not a valid attach type for a CGROUP_SOCK_ADDR program")] pub(crate) struct InvalidAttachType(String); diff --git a/aya-obj/src/programs/cgroup_sockopt.rs b/aya-obj/src/programs/cgroup_sockopt.rs index b48e984a..e3495728 100644 --- a/aya-obj/src/programs/cgroup_sockopt.rs +++ b/aya-obj/src/programs/cgroup_sockopt.rs @@ -1,10 +1,10 @@ //! Cgroup socket option programs. use alloc::{borrow::ToOwned, string::String}; -use crate::{ - generated::bpf_attach_type, - thiserror::{self, Error}, -}; +use crate::generated::bpf_attach_type; + +#[cfg(not(feature = "std"))] +use crate::std; /// Defines where to attach a `CgroupSockopt` program. #[derive(Copy, Clone, Debug)] @@ -24,7 +24,7 @@ impl From for bpf_attach_type { } } -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] #[error("{0} is not a valid attach type for a CGROUP_SOCKOPT program")] pub(crate) struct InvalidAttachType(String); diff --git a/aya-obj/src/relocation.rs b/aya-obj/src/relocation.rs index c2f2096d..6422ed38 100644 --- a/aya-obj/src/relocation.rs +++ b/aya-obj/src/relocation.rs @@ -1,7 +1,6 @@ //! Program relocation handling. use core::mem; -use std::collections::HashSet; use alloc::{borrow::ToOwned, string::String}; use log::debug; @@ -14,15 +13,17 @@ use crate::{ }, maps::Map, obj::{Function, Object, Program}, - thiserror::{self, Error}, - util::HashMap, + util::{HashMap, HashSet}, BpfSectionKind, }; +#[cfg(not(feature = "std"))] +use crate::std; + pub(crate) const INS_SIZE: usize = mem::size_of::(); /// The error type returned by [`Object::relocate_maps`] and [`Object::relocate_calls`] -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] #[error("error relocating `{function}`")] pub struct BpfRelocationError { /// The function name @@ -33,7 +34,7 @@ pub struct BpfRelocationError { } /// Relocation failures -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] pub enum RelocationError { /// Unknown symbol #[error("unknown symbol, index `{index}`")] @@ -428,7 +429,7 @@ impl<'a> FunctionLinker<'a> { let callee_ins_index = self.link_function(program, callee)? as i32; - let mut ins = &mut program.instructions[ins_index]; + let ins = &mut program.instructions[ins_index]; let ins_index = ins_index as i32; ins.imm = callee_ins_index - ins_index - 1; debug!( @@ -589,8 +590,6 @@ mod test { assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[0].imm, 1); - - mem::forget(map); } #[test] @@ -650,9 +649,6 @@ mod test { assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[1].imm, 2); - - mem::forget(map_1); - mem::forget(map_2); } #[test] @@ -689,8 +685,6 @@ mod test { assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[0].imm, 1); - - mem::forget(map); } #[test] @@ -750,8 +744,5 @@ mod test { assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[1].imm, 2); - - mem::forget(map_1); - mem::forget(map_2); } } diff --git a/aya-obj/src/util.rs b/aya-obj/src/util.rs index 36355a18..53209444 100644 --- a/aya-obj/src/util.rs +++ b/aya-obj/src/util.rs @@ -1,10 +1,15 @@ use core::{mem, slice}; -#[cfg(feature = "no_std")] +#[cfg(not(feature = "std"))] pub(crate) use hashbrown::HashMap; -#[cfg(not(feature = "no_std"))] +#[cfg(feature = "std")] pub(crate) use std::collections::HashMap; +#[cfg(not(feature = "std"))] +pub(crate) use hashbrown::HashSet; +#[cfg(feature = "std")] +pub(crate) use std::collections::HashSet; + /// bytes_of converts a to a byte slice pub(crate) unsafe fn bytes_of(val: &T) -> &[u8] { let size = mem::size_of::(); diff --git a/aya-tool/Cargo.toml b/aya-tool/Cargo.toml index 8f7d9f93..cfc605b2 100644 --- a/aya-tool/Cargo.toml +++ b/aya-tool/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Alessandro Decina "] edition = "2021" [dependencies] -bindgen = "0.64" +bindgen = "0.65" clap = { version = "4", features = ["derive"] } anyhow = "1" thiserror = "1" diff --git a/aya/Cargo.toml b/aya/Cargo.toml index 1b00546e..f12ebf27 100644 --- a/aya/Cargo.toml +++ b/aya/Cargo.toml @@ -12,10 +12,10 @@ edition = "2021" [dependencies] libc = { version = "0.2.105" } -aya-obj = { path = "../aya-obj", version = "0.1.0" } +aya-obj = { path = "../aya-obj", version = "0.1.0", features = ["std"] } thiserror = "1" -object = { version = "0.30", default-features = false, features = ["std", "read_core", "elf"] } -bitflags = "1.2.1" +object = { version = "0.31", default-features = false, features = ["std", "read_core", "elf"] } +bitflags = "2.2.1" bytes = "1" lazy_static = "1" parking_lot = { version = "0.12.0", features = ["send_guard"] } diff --git a/aya/src/bpf.rs b/aya/src/bpf.rs index 34d88277..938629d0 100644 --- a/aya/src/bpf.rs +++ b/aya/src/bpf.rs @@ -9,9 +9,9 @@ use std::{ use aya_obj::{ btf::{BtfFeatures, BtfRelocationError}, - generated::BPF_F_XDP_HAS_FRAGS, + generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS}, relocation::BpfRelocationError, - BpfSectionKind, + BpfSectionKind, Features, }; use log::debug; use thiserror::Error; @@ -33,9 +33,10 @@ use crate::{ SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp, }, sys::{ - bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_btf_datasec_supported, - is_btf_decl_tag_supported, is_btf_float_supported, is_btf_func_global_supported, - is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported, + bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_bpf_global_data_supported, + is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_float_supported, + is_btf_func_global_supported, is_btf_func_supported, is_btf_supported, + is_btf_type_tag_supported, is_perf_link_supported, is_probe_read_kernel_supported, is_prog_name_supported, retry_with_verifier_logs, }, util::{bytes_of, bytes_of_slice, possible_cpus, VerifierLog, POSSIBLE_CPUS}, @@ -66,39 +67,31 @@ unsafe impl Pod for [T; N] {} pub use aya_obj::maps::{bpf_map_def, PinningType}; lazy_static! { - pub(crate) static ref FEATURES: Features = Features::new(); + pub(crate) static ref FEATURES: Features = detect_features(); } -// Features implements BPF and BTF feature detection -#[derive(Default, Debug)] -pub(crate) struct Features { - pub bpf_name: bool, - pub bpf_perf_link: bool, - pub btf: Option, -} - -impl Features { - fn new() -> Self { - let btf = if is_btf_supported() { - Some(BtfFeatures { - btf_func: is_btf_func_supported(), - btf_func_global: is_btf_func_global_supported(), - btf_datasec: is_btf_datasec_supported(), - btf_float: is_btf_float_supported(), - btf_decl_tag: is_btf_decl_tag_supported(), - btf_type_tag: is_btf_type_tag_supported(), - }) - } else { - None - }; - let f = Features { - bpf_name: is_prog_name_supported(), - bpf_perf_link: is_perf_link_supported(), - btf, - }; - debug!("BPF Feature Detection: {:#?}", f); - f - } +fn detect_features() -> Features { + let btf = if is_btf_supported() { + Some(BtfFeatures { + btf_func: is_btf_func_supported(), + btf_func_global: is_btf_func_global_supported(), + btf_datasec: is_btf_datasec_supported(), + btf_float: is_btf_float_supported(), + btf_decl_tag: is_btf_decl_tag_supported(), + btf_type_tag: is_btf_type_tag_supported(), + }) + } else { + None + }; + let f = Features { + bpf_name: is_prog_name_supported(), + bpf_probe_read_kernel: is_probe_read_kernel_supported(), + bpf_perf_link: is_perf_link_supported(), + bpf_global_data: is_bpf_global_data_supported(), + btf, + }; + debug!("BPF Feature Detection: {:#?}", f); + f } /// Builder style API for advanced loading of eBPF programs. @@ -134,13 +127,14 @@ pub struct BpfLoader<'a> { bitflags! { /// Used to set the verifier log level flags in [BpfLoader](BpfLoader::verifier_log_level()). + #[derive(Debug)] pub struct VerifierLogLevel: u32 { /// Sets no verifier logging. const DISABLE = 0; /// Enables debug verifier logging. const DEBUG = 1; /// Enables verbose verifier logging. - const VERBOSE = 2 | Self::DEBUG.bits; + const VERBOSE = 2 | Self::DEBUG.bits(); /// Enables verifier stats. const STATS = 4; } @@ -148,9 +142,7 @@ bitflags! { impl Default for VerifierLogLevel { fn default() -> Self { - Self { - bits: Self::DEBUG.bits | Self::STATS.bits, - } + Self::DEBUG | Self::STATS } } @@ -348,7 +340,7 @@ impl<'a> BpfLoader<'a> { /// # Ok::<(), aya::BpfError>(()) /// ``` pub fn load(&mut self, data: &[u8]) -> Result { - let verifier_log_level = self.verifier_log_level.bits; + let verifier_log_level = self.verifier_log_level.bits(); let mut obj = Object::parse(data)?; obj.patch_map_data(self.globals.clone())?; @@ -368,6 +360,12 @@ impl<'a> BpfLoader<'a> { } let mut maps = HashMap::new(); for (name, mut obj) in obj.maps.drain() { + if let (false, BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata) = + (FEATURES.bpf_global_data, obj.section_kind()) + { + continue; + } + match self.max_entries.get(name.as_str()) { Some(size) => obj.set_max_entries(*size), None => { @@ -443,6 +441,7 @@ impl<'a> BpfLoader<'a> { &text_sections, )?; obj.relocate_calls(&text_sections)?; + obj.sanitize_programs(&FEATURES); let programs = obj .programs @@ -485,12 +484,10 @@ impl<'a> BpfLoader<'a> { data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), }) } - ProgramSection::Xdp { - frags_supported, .. - } => { + ProgramSection::Xdp { frags, .. } => { let mut data = ProgramData::new(prog_name, obj, btf_fd, verifier_log_level); - if *frags_supported { + if *frags { data.flags = BPF_F_XDP_HAS_FRAGS; } Program::Xdp(Xdp { data }) @@ -558,9 +555,14 @@ impl<'a> BpfLoader<'a> { data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), }) } - ProgramSection::Lsm { .. } => Program::Lsm(Lsm { - data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), - }), + ProgramSection::Lsm { sleepable, .. } => { + let mut data = + ProgramData::new(prog_name, obj, btf_fd, verifier_log_level); + if *sleepable { + data.flags = BPF_F_SLEEPABLE; + } + Program::Lsm(Lsm { data }) + } ProgramSection::BtfTracePoint { .. } => { Program::BtfTracePoint(BtfTracePoint { data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), diff --git a/aya/src/maps/array/array.rs b/aya/src/maps/array/array.rs index 9344f192..71ac1223 100644 --- a/aya/src/maps/array/array.rs +++ b/aya/src/maps/array/array.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -35,9 +34,9 @@ pub struct Array { _v: PhantomData, } -impl, V: Pod> Array { +impl, V: Pod> Array { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -52,7 +51,7 @@ impl, V: Pod> Array { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn len(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } /// Returns the value stored at the given index. @@ -62,7 +61,7 @@ impl, V: Pod> Array { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_lookup_elem` fails. pub fn get(&self, index: &u32, flags: u64) -> Result { - let data = self.inner.as_ref(); + let data = self.inner.borrow(); check_bounds(data, *index)?; let fd = data.fd_or_err()?; @@ -82,7 +81,7 @@ impl, V: Pod> Array { } } -impl, V: Pod> Array { +impl, V: Pod> Array { /// Sets the value of the element at the given index. /// /// # Errors @@ -90,7 +89,7 @@ impl, V: Pod> Array { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_update_elem` fails. pub fn set(&mut self, index: u32, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| { @@ -103,9 +102,9 @@ impl, V: Pod> Array { } } -impl, V: Pod> IterableMap for Array { +impl, V: Pod> IterableMap for Array { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result { diff --git a/aya/src/maps/array/per_cpu_array.rs b/aya/src/maps/array/per_cpu_array.rs index a3b5eab9..e83f59e5 100644 --- a/aya/src/maps/array/per_cpu_array.rs +++ b/aya/src/maps/array/per_cpu_array.rs @@ -1,5 +1,5 @@ use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -53,9 +53,9 @@ pub struct PerCpuArray { _v: PhantomData, } -impl, V: Pod> PerCpuArray { +impl, V: Pod> PerCpuArray { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -70,7 +70,7 @@ impl, V: Pod> PerCpuArray { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn len(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } /// Returns a slice of values - one for each CPU - stored at the given index. @@ -80,7 +80,7 @@ impl, V: Pod> PerCpuArray { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_lookup_elem` fails. pub fn get(&self, index: &u32, flags: u64) -> Result, MapError> { - let data = self.inner.as_ref(); + let data = self.inner.borrow(); check_bounds(data, *index)?; let fd = data.fd_or_err()?; @@ -100,7 +100,7 @@ impl, V: Pod> PerCpuArray { } } -impl, V: Pod> PerCpuArray { +impl, V: Pod> PerCpuArray { /// Sets the values - one for each CPU - at the given index. /// /// # Errors @@ -108,7 +108,7 @@ impl, V: Pod> PerCpuArray { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_update_elem` fails. pub fn set(&mut self, index: u32, values: PerCpuValues, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; @@ -122,9 +122,9 @@ impl, V: Pod> PerCpuArray { } } -impl, V: Pod> IterableMap> for PerCpuArray { +impl, V: Pod> IterableMap> for PerCpuArray { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result, MapError> { diff --git a/aya/src/maps/array/program_array.rs b/aya/src/maps/array/program_array.rs index 27566e28..b8a57cf5 100644 --- a/aya/src/maps/array/program_array.rs +++ b/aya/src/maps/array/program_array.rs @@ -1,7 +1,7 @@ //! An array of eBPF program file descriptors used as a jump table. use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, os::unix::prelude::{AsRawFd, RawFd}, }; @@ -51,9 +51,9 @@ pub struct ProgramArray { inner: T, } -impl> ProgramArray { +impl> ProgramArray { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -64,17 +64,17 @@ impl> ProgramArray { /// An iterator over the indices of the array that point to a program. The iterator item type /// is `Result`. pub fn indices(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl> ProgramArray { +impl> ProgramArray { /// Sets the target program file descriptor for the given index in the jump table. /// /// When an eBPF program calls `bpf_tail_call(ctx, prog_array, index)`, control /// flow will jump to `program`. pub fn set(&mut self, index: u32, program: ProgramFd, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; let prog_fd = program.as_raw_fd(); @@ -93,9 +93,9 @@ impl> ProgramArray { /// Calling `bpf_tail_call(ctx, prog_array, index)` on an index that has been cleared returns an /// error. pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, *index)?; - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow_mut().fd_or_err()?; bpf_map_delete_elem(fd, index) .map(|_| ()) diff --git a/aya/src/maps/bloom_filter.rs b/aya/src/maps/bloom_filter.rs index a58b9b44..f7f4be02 100644 --- a/aya/src/maps/bloom_filter.rs +++ b/aya/src/maps/bloom_filter.rs @@ -1,5 +1,5 @@ //! A Bloom Filter. -use std::{borrow::Borrow, convert::AsRef, marker::PhantomData}; +use std::{borrow::Borrow, marker::PhantomData}; use crate::{ maps::{check_v_size, MapData, MapError}, @@ -35,9 +35,9 @@ pub struct BloomFilter { _v: PhantomData, } -impl, V: Pod> BloomFilter { +impl, V: Pod> BloomFilter { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_v_size::(data)?; let _ = data.fd_or_err()?; @@ -50,7 +50,7 @@ impl, V: Pod> BloomFilter { /// Query the existence of the element. pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_lookup_elem_ptr::(fd, None, &mut value, flags) .map_err(|(_, io_error)| MapError::SyscallError { @@ -63,7 +63,7 @@ impl, V: Pod> BloomFilter { /// Inserts a value into the map. pub fn insert(&self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_push_elem".to_owned(), diff --git a/aya/src/maps/hash_map/hash_map.rs b/aya/src/maps/hash_map/hash_map.rs index 14f5e73f..26a4af0f 100644 --- a/aya/src/maps/hash_map/hash_map.rs +++ b/aya/src/maps/hash_map/hash_map.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -39,9 +38,9 @@ pub struct HashMap { _v: PhantomData, } -impl, K: Pod, V: Pod> HashMap { +impl, K: Pod, V: Pod> HashMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -54,7 +53,7 @@ impl, K: Pod, V: Pod> HashMap { /// Returns a copy of the value associated with the key. pub fn get(&self, key: &K, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -73,11 +72,11 @@ impl, K: Pod, V: Pod> HashMap { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl, K: Pod, V: Pod> HashMap { +impl, K: Pod, V: Pod> HashMap { /// Inserts a key-value pair into the map. pub fn insert( &mut self, @@ -85,18 +84,18 @@ impl, K: Pod, V: Pod> HashMap { value: impl Borrow, flags: u64, ) -> Result<(), MapError> { - hash_map::insert(self.inner.as_mut(), key.borrow(), value.borrow(), flags) + hash_map::insert(self.inner.borrow_mut(), key.borrow(), value.borrow(), flags) } /// Removes a key from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod, V: Pod> IterableMap for HashMap { +impl, K: Pod, V: Pod> IterableMap for HashMap { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result { diff --git a/aya/src/maps/hash_map/per_cpu_hash_map.rs b/aya/src/maps/hash_map/per_cpu_hash_map.rs index eb7fe6ae..eea8ed01 100644 --- a/aya/src/maps/hash_map/per_cpu_hash_map.rs +++ b/aya/src/maps/hash_map/per_cpu_hash_map.rs @@ -1,7 +1,6 @@ //! Per-CPU hash map. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -48,9 +47,9 @@ pub struct PerCpuHashMap { _v: PhantomData, } -impl, K: Pod, V: Pod> PerCpuHashMap { +impl, K: Pod, V: Pod> PerCpuHashMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -64,7 +63,7 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// Returns a slice of values - one for each CPU - associated with the key. pub fn get(&self, key: &K, flags: u64) -> Result, MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let values = bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -83,11 +82,11 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl, K: Pod, V: Pod> PerCpuHashMap { +impl, K: Pod, V: Pod> PerCpuHashMap { /// Inserts a slice of values - one for each CPU - for the given key. /// /// # Examples @@ -122,7 +121,7 @@ impl, K: Pod, V: Pod> PerCpuHashMap { values: PerCpuValues, flags: u64, ) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow_mut().fd_or_err()?; bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err( |(_, io_error)| MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), @@ -135,13 +134,15 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// Removes a key from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod, V: Pod> IterableMap> for PerCpuHashMap { +impl, K: Pod, V: Pod> IterableMap> + for PerCpuHashMap +{ fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result, MapError> { diff --git a/aya/src/maps/lpm_trie.rs b/aya/src/maps/lpm_trie.rs index d8f3b66b..95894a65 100644 --- a/aya/src/maps/lpm_trie.rs +++ b/aya/src/maps/lpm_trie.rs @@ -1,7 +1,6 @@ //! A LPM Trie. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -99,9 +98,9 @@ impl Clone for Key { // A Pod impl is required as Key struct is a key for a map. unsafe impl Pod for Key {} -impl, K: Pod, V: Pod> LpmTrie { +impl, K: Pod, V: Pod> LpmTrie { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::, V>(data)?; let _ = data.fd_or_err()?; @@ -115,7 +114,7 @@ impl, K: Pod, V: Pod> LpmTrie { /// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie. pub fn get(&self, key: &Key, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -134,17 +133,17 @@ impl, K: Pod, V: Pod> LpmTrie { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result, MapError>`. pub fn keys(&self) -> MapKeys<'_, Key> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// An iterator visiting all keys matching key. The /// iterator item type is `Result, MapError>`. pub fn iter_key(&self, key: Key) -> LpmTrieKeys<'_, K> { - LpmTrieKeys::new(self.inner.as_ref(), key) + LpmTrieKeys::new(self.inner.borrow(), key) } } -impl, K: Pod, V: Pod> LpmTrie { +impl, K: Pod, V: Pod> LpmTrie { /// Inserts a key value pair into the map. pub fn insert( &mut self, @@ -152,7 +151,7 @@ impl, K: Pod, V: Pod> LpmTrie { value: impl Borrow, flags: u64, ) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), @@ -167,7 +166,7 @@ impl, K: Pod, V: Pod> LpmTrie { /// /// Both the prefix and data must match exactly - this method does not do a longest prefix match. pub fn remove(&mut self, key: &Key) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_delete_elem(fd, key) .map(|_| ()) .map_err(|(_, io_error)| MapError::SyscallError { @@ -177,9 +176,9 @@ impl, K: Pod, V: Pod> LpmTrie { } } -impl, K: Pod, V: Pod> IterableMap, V> for LpmTrie { +impl, K: Pod, V: Pod> IterableMap, V> for LpmTrie { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &Key) -> Result { diff --git a/aya/src/maps/mod.rs b/aya/src/maps/mod.rs index 271cbab8..fa27c053 100644 --- a/aya/src/maps/mod.rs +++ b/aya/src/maps/mod.rs @@ -37,7 +37,6 @@ //! versa. Because of that, all map values must be plain old data and therefore //! implement the [Pod] trait. use std::{ - convert::{AsMut, AsRef}, ffi::CString, fmt, io, marker::PhantomData, @@ -481,18 +480,6 @@ pub struct MapData { pub pinned: bool, } -impl AsRef for MapData { - fn as_ref(&self) -> &MapData { - self - } -} - -impl AsMut for MapData { - fn as_mut(&mut self) -> &mut MapData { - self - } -} - impl MapData { /// Creates a new map with the provided `name` pub fn create(&mut self, name: &str) -> Result { diff --git a/aya/src/maps/perf/async_perf_event_array.rs b/aya/src/maps/perf/async_perf_event_array.rs index 39ffcfa1..7e55889a 100644 --- a/aya/src/maps/perf/async_perf_event_array.rs +++ b/aya/src/maps/perf/async_perf_event_array.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use std::{ - convert::AsMut, + borrow::{Borrow, BorrowMut}, os::unix::prelude::{AsRawFd, RawFd}, }; @@ -89,7 +89,7 @@ pub struct AsyncPerfEventArray { perf_map: PerfEventArray, } -impl + AsRef> AsyncPerfEventArray { +impl + Borrow> AsyncPerfEventArray { /// Opens the perf buffer at the given index. /// /// The returned buffer will receive all the events eBPF programs send at the given index. @@ -112,7 +112,7 @@ impl + AsRef> AsyncPerfEventArray { } } -impl> AsyncPerfEventArray { +impl> AsyncPerfEventArray { pub(crate) fn new(map: T) -> Result, MapError> { Ok(AsyncPerfEventArray { perf_map: PerfEventArray::new(map)?, @@ -138,7 +138,7 @@ pub struct AsyncPerfEventArrayBuffer { } #[cfg(any(feature = "async_tokio"))] -impl + AsRef> AsyncPerfEventArrayBuffer { +impl + Borrow> AsyncPerfEventArrayBuffer { /// Reads events from the buffer. /// /// This method reads events into the provided slice of buffers, filling @@ -168,7 +168,7 @@ impl + AsRef> AsyncPerfEventArrayBuffer { } #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] -impl + AsRef> AsyncPerfEventArrayBuffer { +impl + Borrow> AsyncPerfEventArrayBuffer { /// Reads events from the buffer. /// /// This method reads events into the provided slice of buffers, filling diff --git a/aya/src/maps/perf/perf_event_array.rs b/aya/src/maps/perf/perf_event_array.rs index 8dafd7f1..c241a37b 100644 --- a/aya/src/maps/perf/perf_event_array.rs +++ b/aya/src/maps/perf/perf_event_array.rs @@ -2,7 +2,7 @@ //! //! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page. use std::{ - convert::AsMut, + borrow::{Borrow, BorrowMut}, ops::Deref, os::unix::io::{AsRawFd, RawFd}, sync::Arc, @@ -31,7 +31,7 @@ pub struct PerfEventArrayBuffer { buf: PerfBuffer, } -impl + AsRef> PerfEventArrayBuffer { +impl + Borrow> PerfEventArrayBuffer { /// Returns true if the buffer contains events that haven't been read. pub fn readable(&self) -> bool { self.buf.readable() @@ -55,7 +55,7 @@ impl + AsRef> PerfEventArrayBuffer { } } -impl + AsRef> AsRawFd for PerfEventArrayBuffer { +impl + Borrow> AsRawFd for PerfEventArrayBuffer { fn as_raw_fd(&self) -> RawFd { self.buf.as_raw_fd() } @@ -84,14 +84,14 @@ impl + AsRef> AsRawFd for PerfEventArrayBuffer { /// ```no_run /// # use aya::maps::perf::PerfEventArrayBuffer; /// # use aya::maps::MapData; -/// # use std::convert::AsMut; +/// # use std::borrow::BorrowMut; /// # struct Poll { _t: std::marker::PhantomData }; -/// # impl> Poll { +/// # impl> Poll { /// # fn poll_readable(&self) -> &mut [PerfEventArrayBuffer] { /// # &mut [] /// # } /// # } -/// # fn poll_buffers>(bufs: Vec>) -> Poll { +/// # fn poll_buffers>(bufs: Vec>) -> Poll { /// # Poll { _t: std::marker::PhantomData } /// # } /// # #[derive(thiserror::Error, Debug)] @@ -160,9 +160,9 @@ pub struct PerfEventArray { page_size: usize, } -impl> PerfEventArray { +impl> PerfEventArray { pub(crate) fn new(map: T) -> Result, MapError> { - let _fd = map.as_ref().fd_or_err()?; + let _fd = map.borrow().fd_or_err()?; Ok(PerfEventArray { map: Arc::new(map), @@ -171,7 +171,7 @@ impl> PerfEventArray { } } -impl + AsRef> PerfEventArray { +impl + Borrow> PerfEventArray { /// Opens the perf buffer at the given index. /// /// The returned buffer will receive all the events eBPF programs send at the given index. @@ -183,7 +183,7 @@ impl + AsRef> PerfEventArray { // FIXME: keep track of open buffers // this cannot fail as new() checks that the fd is open - let map_data: &MapData = self.map.deref().as_ref(); + let map_data: &MapData = self.map.deref().borrow(); let map_fd = map_data.fd_or_err().unwrap(); let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?; bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0) diff --git a/aya/src/maps/queue.rs b/aya/src/maps/queue.rs index d10ecd04..c6810546 100644 --- a/aya/src/maps/queue.rs +++ b/aya/src/maps/queue.rs @@ -1,7 +1,6 @@ //! A FIFO queue. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -34,9 +33,9 @@ pub struct Queue { _v: PhantomData, } -impl, V: Pod> Queue { +impl, V: Pod> Queue { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::<(), V>(data)?; let _fd = data.fd_or_err()?; @@ -51,11 +50,11 @@ impl, V: Pod> Queue { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn capacity(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } } -impl, V: Pod> Queue { +impl, V: Pod> Queue { /// Removes the first element and returns it. /// /// # Errors @@ -63,7 +62,7 @@ impl, V: Pod> Queue { /// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`] /// if `bpf_map_lookup_and_delete_elem` fails. pub fn pop(&mut self, flags: u64) -> Result { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_and_delete_elem::(fd, None, flags).map_err( |(_, io_error)| MapError::SyscallError { @@ -80,7 +79,7 @@ impl, V: Pod> Queue { /// /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. pub fn push(&mut self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_push_elem".to_owned(), diff --git a/aya/src/maps/sock/sock_hash.rs b/aya/src/maps/sock/sock_hash.rs index 7da2d097..244dcd1c 100644 --- a/aya/src/maps/sock/sock_hash.rs +++ b/aya/src/maps/sock/sock_hash.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, os::unix::io::{AsRawFd, RawFd}, }; @@ -69,9 +68,9 @@ pub struct SockHash { _k: PhantomData, } -impl, K: Pod> SockHash { +impl, K: Pod> SockHash { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -83,7 +82,7 @@ impl, K: Pod> SockHash { /// Returns the fd of the socket stored at the given key. pub fn get(&self, key: &K, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -102,7 +101,7 @@ impl, K: Pod> SockHash { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// Returns the map's file descriptor. @@ -110,11 +109,11 @@ impl, K: Pod> SockHash { /// The returned file descriptor can be used to attach programs that work with /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). pub fn fd(&self) -> Result { - Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) + Ok(SockMapFd(self.inner.borrow().fd_or_err()?)) } } -impl, K: Pod> SockHash { +impl, K: Pod> SockHash { /// Inserts a socket under the given key. pub fn insert( &mut self, @@ -122,18 +121,23 @@ impl, K: Pod> SockHash { value: I, flags: u64, ) -> Result<(), MapError> { - hash_map::insert(self.inner.as_mut(), key.borrow(), &value.as_raw_fd(), flags) + hash_map::insert( + self.inner.borrow_mut(), + key.borrow(), + &value.as_raw_fd(), + flags, + ) } /// Removes a socket from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod> IterableMap for SockHash { +impl, K: Pod> IterableMap for SockHash { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result { diff --git a/aya/src/maps/sock/sock_map.rs b/aya/src/maps/sock/sock_map.rs index 28c5b404..32625b41 100644 --- a/aya/src/maps/sock/sock_map.rs +++ b/aya/src/maps/sock/sock_map.rs @@ -1,7 +1,7 @@ //! An array of eBPF program file descriptors used as a jump table. use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, os::unix::{io::AsRawFd, prelude::RawFd}, }; @@ -44,9 +44,9 @@ pub struct SockMap { pub(crate) inner: T, } -impl> SockMap { +impl> SockMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -57,7 +57,7 @@ impl> SockMap { /// An iterator over the indices of the array that point to a program. The iterator item type /// is `Result`. pub fn indices(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// Returns the map's file descriptor. @@ -65,14 +65,14 @@ impl> SockMap { /// The returned file descriptor can be used to attach programs that work with /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). pub fn fd(&self) -> Result { - Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) + Ok(SockMapFd(self.inner.borrow().fd_or_err()?)) } } -impl> SockMap { +impl> SockMap { /// Stores a socket into the map. pub fn set(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); let fd = data.fd_or_err()?; check_bounds(data, index)?; bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err( @@ -86,7 +86,7 @@ impl> SockMap { /// Removes the socket stored at `index` from the map. pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); let fd = data.fd_or_err()?; check_bounds(data, *index)?; bpf_map_delete_elem(fd, index) diff --git a/aya/src/maps/stack.rs b/aya/src/maps/stack.rs index c0742cfa..db428ddf 100644 --- a/aya/src/maps/stack.rs +++ b/aya/src/maps/stack.rs @@ -1,7 +1,6 @@ //! A LIFO stack. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -34,9 +33,9 @@ pub struct Stack { _v: PhantomData, } -impl, V: Pod> Stack { +impl, V: Pod> Stack { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::<(), V>(data)?; let _fd = data.fd_or_err()?; @@ -51,11 +50,11 @@ impl, V: Pod> Stack { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn capacity(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } } -impl, V: Pod> Stack { +impl, V: Pod> Stack { /// Removes the last element and returns it. /// /// # Errors @@ -63,7 +62,7 @@ impl, V: Pod> Stack { /// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`] /// if `bpf_map_lookup_and_delete_elem` fails. pub fn pop(&mut self, flags: u64) -> Result { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_and_delete_elem::(fd, None, flags).map_err( |(_, io_error)| MapError::SyscallError { @@ -80,7 +79,7 @@ impl, V: Pod> Stack { /// /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. pub fn push(&mut self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), diff --git a/aya/src/maps/stack_trace.rs b/aya/src/maps/stack_trace.rs index 135387e4..a8cce414 100644 --- a/aya/src/maps/stack_trace.rs +++ b/aya/src/maps/stack_trace.rs @@ -1,7 +1,7 @@ //! A hash map of kernel or user space stack traces. //! //! See [`StackTraceMap`] for documentation and examples. -use std::{collections::BTreeMap, convert::AsRef, fs, io, mem, path::Path, str::FromStr}; +use std::{borrow::Borrow, collections::BTreeMap, fs, io, mem, path::Path, str::FromStr}; use crate::{ maps::{IterableMap, MapData, MapError, MapIter, MapKeys}, @@ -67,9 +67,9 @@ pub struct StackTraceMap { max_stack_depth: usize, } -impl> StackTraceMap { +impl> StackTraceMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); let expected = mem::size_of::(); let size = data.obj.key_size() as usize; if size != expected { @@ -102,7 +102,7 @@ impl> StackTraceMap { /// Returns [`MapError::KeyNotFound`] if there is no stack trace with the /// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails. pub fn get(&self, stack_id: &u32, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let mut frames = vec![0; self.max_stack_depth]; bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags) @@ -136,13 +136,13 @@ impl> StackTraceMap { /// An iterator visiting all the stack_ids in arbitrary order. The iterator element /// type is `Result`. pub fn stack_ids(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl> IterableMap for StackTraceMap { +impl> IterableMap for StackTraceMap { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result { @@ -150,7 +150,7 @@ impl> IterableMap for StackTraceMap { } } -impl<'a, T: AsRef> IntoIterator for &'a StackTraceMap { +impl<'a, T: Borrow> IntoIterator for &'a StackTraceMap { type Item = Result<(u32, StackTrace), MapError>; type IntoIter = MapIter<'a, u32, StackTrace, StackTraceMap>; diff --git a/aya/src/programs/xdp.rs b/aya/src/programs/xdp.rs index 63f00987..1fad9da1 100644 --- a/aya/src/programs/xdp.rs +++ b/aya/src/programs/xdp.rs @@ -35,7 +35,7 @@ pub enum XdpError { bitflags! { /// Flags passed to [`Xdp::attach()`]. - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default)] pub struct XdpFlags: u32 { /// Skb mode. const SKB_MODE = XDP_FLAGS_SKB_MODE; @@ -128,7 +128,7 @@ impl Xdp { let k_ver = kernel_version().unwrap(); if k_ver >= (5, 9, 0) { - let link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, None, flags.bits).map_err( + let link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, None, flags.bits()).map_err( |(_, io_error)| ProgramError::SyscallError { call: "bpf_link_create".to_owned(), io_error, @@ -138,7 +138,7 @@ impl Xdp { .links .insert(XdpLink::new(XdpLinkInner::FdLink(FdLink::new(link_fd)))) } else { - unsafe { netlink_set_xdp_fd(if_index, prog_fd, None, flags.bits) } + unsafe { netlink_set_xdp_fd(if_index, prog_fd, None, flags.bits()) } .map_err(|io_error| XdpError::NetlinkError { io_error })?; self.data @@ -226,9 +226,9 @@ impl Link for NlLink { fn detach(self) -> Result<(), ProgramError> { let k_ver = kernel_version().unwrap(); let flags = if k_ver >= (5, 7, 0) { - self.flags.bits | XDP_FLAGS_REPLACE + self.flags.bits() | XDP_FLAGS_REPLACE } else { - self.flags.bits + self.flags.bits() }; let _ = unsafe { netlink_set_xdp_fd(self.if_index, -1, Some(self.prog_fd), flags) }; Ok(()) diff --git a/aya/src/sys/bpf.rs b/aya/src/sys/bpf.rs index 101823d7..368ef4a4 100644 --- a/aya/src/sys/bpf.rs +++ b/aya/src/sys/bpf.rs @@ -8,13 +8,17 @@ use std::{ }; use libc::{c_char, c_long, close, ENOENT, ENOSPC}; +use obj::{ + maps::{bpf_map_def, LegacyMap}, + BpfSectionKind, +}; use crate::{ generated::{ bpf_attach_type, bpf_attr, bpf_btf_info, bpf_cmd, bpf_insn, bpf_link_info, bpf_map_info, bpf_map_type, bpf_prog_info, bpf_prog_type, BPF_F_REPLACE, }, - maps::PerCpuValues, + maps::{MapData, PerCpuValues}, obj::{ self, btf::{ @@ -66,7 +70,7 @@ pub(crate) fn bpf_create_map(name: &CStr, def: &obj::Map, btf_fd: Option) _ => { u.btf_key_type_id = m.def.btf_key_type_id; u.btf_value_type_id = m.def.btf_value_type_id; - u.btf_fd = btf_fd.unwrap() as u32; + u.btf_fd = btf_fd.unwrap_or_default() as u32; } } } @@ -599,6 +603,37 @@ pub(crate) fn is_prog_name_supported() -> bool { } } +pub(crate) fn is_probe_read_kernel_supported() -> bool { + let mut attr = unsafe { mem::zeroed::() }; + let u = unsafe { &mut attr.__bindgen_anon_3 }; + + let prog: &[u8] = &[ + 0xbf, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = r10 + 0x07, 0x01, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, // r1 -= 8 + 0xb7, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // r2 = 8 + 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0 + 0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, // call 113 + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit + ]; + + let gpl = b"GPL\0"; + u.license = gpl.as_ptr() as u64; + + let insns = copy_instructions(prog).unwrap(); + u.insn_cnt = insns.len() as u32; + u.insns = insns.as_ptr() as u64; + u.prog_type = bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT as u32; + + match sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) { + Ok(v) => { + let fd = v as RawFd; + unsafe { close(fd) }; + true + } + Err(_) => false, + } +} + pub(crate) fn is_perf_link_supported() -> bool { let mut attr = unsafe { mem::zeroed::() }; let u = unsafe { &mut attr.__bindgen_anon_3 }; @@ -630,6 +665,60 @@ pub(crate) fn is_perf_link_supported() -> bool { false } +pub(crate) fn is_bpf_global_data_supported() -> bool { + let mut attr = unsafe { mem::zeroed::() }; + let u = unsafe { &mut attr.__bindgen_anon_3 }; + + let prog: &[u8] = &[ + 0x18, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ld_pseudo r1, 0x2, 0x0 + 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, // + 0x7a, 0x01, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, // stdw [r1 + 0x0], 0x2a + 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov64 r0 = 0 + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit + ]; + + let mut insns = copy_instructions(prog).unwrap(); + + let mut map_data = MapData { + obj: obj::Map::Legacy(LegacyMap { + def: bpf_map_def { + map_type: bpf_map_type::BPF_MAP_TYPE_ARRAY as u32, + key_size: 4, + value_size: 32, + max_entries: 1, + ..Default::default() + }, + section_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, + data: Vec::new(), + }), + fd: None, + pinned: false, + btf_fd: None, + }; + + if let Ok(map_fd) = map_data.create("aya_global") { + insns[0].imm = map_fd; + + let gpl = b"GPL\0"; + u.license = gpl.as_ptr() as u64; + u.insn_cnt = insns.len() as u32; + u.insns = insns.as_ptr() as u64; + u.prog_type = bpf_prog_type::BPF_PROG_TYPE_SOCKET_FILTER as u32; + + if let Ok(v) = sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) { + let fd = v as RawFd; + + unsafe { close(fd) }; + + return true; + } + } + + false +} + pub(crate) fn is_btf_supported() -> bool { let mut btf = Btf::new(); let name_offset = btf.add_string("int".to_string()); diff --git a/aya/src/sys/mod.rs b/aya/src/sys/mod.rs index 129889c5..294ff386 100644 --- a/aya/src/sys/mod.rs +++ b/aya/src/sys/mod.rs @@ -8,6 +8,8 @@ mod fake; use std::io; #[cfg(not(test))] use std::{ffi::CString, mem}; +#[cfg(not(test))] +use std::{fs::File, io::Read}; #[cfg(not(test))] use libc::utsname; @@ -82,8 +84,40 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { Ok((0xff, 0xff, 0xff)) } +#[cfg(not(test))] +fn ubuntu_kernel_version() -> Result<(u32, u32, u32), ()> { + if let Ok(mut file) = File::open("/proc/version_signature") { + let mut buf = String::new(); + let mut major = 0u32; + let mut minor = 0u32; + let mut patch = 0u32; + let format = CString::new("%*s %*s %u.%u.%u\n").unwrap(); + + file.read_to_string(&mut buf).map_err(|_| ())?; + + unsafe { + if libc::sscanf( + buf.as_ptr() as *const _, + format.as_ptr(), + &mut major as *mut u32, + &mut minor as *mut _, + &mut patch as *mut _, + ) == 3 + { + return Ok((major, minor, patch)); + } + } + } + + Err(()) +} + #[cfg(not(test))] pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { + if let Ok(version) = ubuntu_kernel_version() { + return Ok(version); + } + unsafe { let mut v = mem::zeroed::(); if libc::uname(&mut v as *mut _) != 0 { @@ -93,6 +127,33 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { let mut major = 0u32; let mut minor = 0u32; let mut patch = 0u32; + + let debian_marker = CString::new("Debian").unwrap(); + + let p = libc::strstr(v.version.as_ptr(), debian_marker.as_ptr()); + + if !p.is_null() { + let debian_format = CString::new("Debian %u.%u.%u").map_err(|_| ())?; + + if libc::sscanf( + p, + debian_format.as_ptr(), + &mut major as *mut u32, + &mut minor as *mut _, + &mut patch as *mut _, + ) == 3 + { + // On Debian 10, kernels after 4.19.229 expect 4.19.255 due to broken Makefile patches. + let patch_level_limit = if major == 4 && minor == 19 { 230 } else { 255 }; + + if patch >= patch_level_limit { + patch = 255; + } + + return Ok((major, minor, patch)); + } + } + let format = CString::new("%u.%u.%u").unwrap(); if libc::sscanf( v.release.as_ptr(), diff --git a/bpf/aya-bpf/src/programs/sk_buff.rs b/bpf/aya-bpf/src/programs/sk_buff.rs index 669c2c95..ab7f5038 100644 --- a/bpf/aya-bpf/src/programs/sk_buff.rs +++ b/bpf/aya-bpf/src/programs/sk_buff.rs @@ -6,8 +6,8 @@ use core::{ use aya_bpf_bindings::helpers::{ bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace, - bpf_skb_adjust_room, bpf_skb_change_type, bpf_skb_load_bytes, bpf_skb_pull_data, - bpf_skb_store_bytes, + bpf_skb_adjust_room, bpf_skb_change_proto, bpf_skb_change_type, bpf_skb_load_bytes, + bpf_skb_pull_data, bpf_skb_store_bytes, }; use aya_bpf_cty::c_long; @@ -189,6 +189,16 @@ impl SkBuff { } } + #[inline] + pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> { + let ret = unsafe { bpf_skb_change_proto(self.as_ptr() as *mut _, proto, flags) }; + if ret == 0 { + Ok(()) + } else { + Err(ret) + } + } + #[inline] pub fn change_type(&self, ty: u32) -> Result<(), c_long> { let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) }; diff --git a/bpf/aya-bpf/src/programs/tc.rs b/bpf/aya-bpf/src/programs/tc.rs index 1d3133d5..5aa0a173 100644 --- a/bpf/aya-bpf/src/programs/tc.rs +++ b/bpf/aya-bpf/src/programs/tc.rs @@ -142,6 +142,11 @@ impl TcContext { self.skb.clone_redirect(if_index, flags) } + #[inline] + pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> { + self.skb.change_proto(proto, flags) + } + #[inline] pub fn change_type(&self, ty: u32) -> Result<(), c_long> { self.skb.change_type(ty) diff --git a/test/integration-ebpf/Cargo.toml b/test/integration-ebpf/Cargo.toml index 69c60316..0683143a 100644 --- a/test/integration-ebpf/Cargo.toml +++ b/test/integration-ebpf/Cargo.toml @@ -6,6 +6,11 @@ publish = false [dependencies] aya-bpf = { path = "../../bpf/aya-bpf" } +aya-log-ebpf = { path = "../../bpf/aya-log-ebpf" } + +[[bin]] +name = "log" +path = "src/log.rs" [[bin]] name = "map_test" diff --git a/test/integration-ebpf/src/log.rs b/test/integration-ebpf/src/log.rs new file mode 100644 index 00000000..7192d5c7 --- /dev/null +++ b/test/integration-ebpf/src/log.rs @@ -0,0 +1,27 @@ +#![no_std] +#![no_main] + +use aya_bpf::{macros::uprobe, programs::ProbeContext}; +use aya_log_ebpf::{debug, error, info, trace, warn}; + +#[uprobe] +pub fn test_log(ctx: ProbeContext) { + debug!(&ctx, "Hello from eBPF!"); + error!(&ctx, "{}, {}, {}", 69, 420i32, "wao"); + let ipv4 = 167772161u32; // 10.0.0.1 + let ipv6 = [ + 32u8, 1u8, 13u8, 184u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8, + ]; // 2001:db8::1 + info!(&ctx, "ipv4: {:ipv4}, ipv6: {:ipv6}", ipv4, ipv6); + let mac = [4u8, 32u8, 6u8, 9u8, 0u8, 64u8]; + trace!(&ctx, "mac lc: {:mac}, mac uc: {:MAC}", mac, mac); + let hex = 0x2f; + warn!(&ctx, "hex lc: {:x}, hex uc: {:X}", hex, hex); + let hex = [0xde, 0xad, 0xbe, 0xef].as_slice(); + debug!(&ctx, "hex lc: {:x}, hex uc: {:X}", hex, hex); +} + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + unsafe { core::hint::unreachable_unchecked() } +} diff --git a/test/integration-test-macros/Cargo.toml b/test/integration-test-macros/Cargo.toml index e28cdc5c..f66b75a8 100644 --- a/test/integration-test-macros/Cargo.toml +++ b/test/integration-test-macros/Cargo.toml @@ -6,7 +6,8 @@ publish = false [dependencies] quote = "1" -syn = {version = "1.0", features = ["full"]} +proc-macro2 = "1.0" +syn = {version = "2.0", features = ["full"]} [lib] proc-macro = true diff --git a/test/integration-test-macros/src/lib.rs b/test/integration-test-macros/src/lib.rs index 297159ed..9818b7f5 100644 --- a/test/integration-test-macros/src/lib.rs +++ b/test/integration-test-macros/src/lib.rs @@ -1,6 +1,7 @@ use proc_macro::TokenStream; +use proc_macro2::Span; use quote::quote; -use syn::{parse_macro_input, ItemFn}; +use syn::{parse_macro_input, Ident, ItemFn}; #[proc_macro_attribute] pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream { @@ -17,3 +18,29 @@ pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream { }; TokenStream::from(expanded) } + +#[proc_macro_attribute] +pub fn tokio_integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream { + let item = parse_macro_input!(item as ItemFn); + let name = &item.sig.ident; + let name_str = &item.sig.ident.to_string(); + let sync_name_str = format!("sync_{name_str}"); + let sync_name = Ident::new(&sync_name_str, Span::call_site()); + let expanded = quote! { + #item + + fn #sync_name() { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(#name()); + } + + inventory::submit!(crate::IntegrationTest { + name: concat!(module_path!(), "::", #sync_name_str), + test_fn: #sync_name, + }); + }; + TokenStream::from(expanded) +} diff --git a/test/integration-test/Cargo.toml b/test/integration-test/Cargo.toml index bfcc5d0b..96a3f793 100644 --- a/test/integration-test/Cargo.toml +++ b/test/integration-test/Cargo.toml @@ -7,16 +7,19 @@ publish = false [dependencies] anyhow = "1" aya = { path = "../../aya" } +aya-log = { path = "../../aya-log" } aya-obj = { path = "../../aya-obj" } clap = { version = "4", features = ["derive"] } env_logger = "0.10" +futures-core = "0.3" inventory = "0.3" integration-test-macros = { path = "../integration-test-macros" } lazy_static = "1" libc = { version = "0.2.105" } log = "0.4" -object = { version = "0.30", default-features = false, features = ["std", "read_core", "elf"] } +object = { version = "0.31", default-features = false, features = ["std", "read_core", "elf"] } rbpf = "0.1.0" regex = "1" tempfile = "3.3.0" libtest-mimic = "0.6.0" +tokio = { version = "1.24", features = ["rt", "rt-multi-thread", "sync", "time"] } diff --git a/test/integration-test/src/tests/log.rs b/test/integration-test/src/tests/log.rs new file mode 100644 index 00000000..811952a3 --- /dev/null +++ b/test/integration-test/src/tests/log.rs @@ -0,0 +1,140 @@ +use std::sync::{Arc, LockResult, Mutex, MutexGuard}; + +use aya::{include_bytes_aligned, programs::UProbe, Bpf}; +use aya_log::BpfLogger; +use log::{Level, Log, Record}; +use tokio::time::{sleep, Duration}; + +use super::tokio_integration_test; + +const MAX_ATTEMPTS: usize = 10; +const TIMEOUT_MS: u64 = 10; + +#[no_mangle] +#[inline(never)] +pub extern "C" fn trigger_ebpf_program() {} + +struct CapturedLogs(Arc>>); + +impl CapturedLogs { + fn with_capacity(capacity: usize) -> Self { + Self(Arc::new(Mutex::new(Vec::with_capacity(capacity)))) + } + + fn clone(&self) -> Self { + Self(self.0.clone()) + } + + fn lock(&self) -> LockResult>> { + self.0.lock() + } + + async fn wait_expected_len(&self, expected_len: usize) { + for _ in 0..MAX_ATTEMPTS { + { + let captured_logs = self.0.lock().expect("Failed to lock captured logs"); + if captured_logs.len() == expected_len { + return; + } + } + sleep(Duration::from_millis(TIMEOUT_MS)).await; + } + panic!( + "Expected {} captured logs, but got {}", + expected_len, + self.0.lock().unwrap().len() + ); + } +} + +struct CapturedLog { + pub body: String, + pub level: Level, + pub target: String, +} + +struct TestingLogger { + captured_logs: CapturedLogs, +} + +impl TestingLogger { + pub fn with_capacity(capacity: usize) -> (Self, CapturedLogs) { + let captured_logs = CapturedLogs::with_capacity(capacity); + ( + Self { + captured_logs: captured_logs.clone(), + }, + captured_logs, + ) + } +} + +impl Log for TestingLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + true + } + + fn flush(&self) {} + + fn log(&self, record: &Record) { + let captured_record = CapturedLog { + body: format!("{}", record.args()), + level: record.level(), + target: record.target().to_string(), + }; + self.captured_logs + .lock() + .expect("Failed to acquire a lock for storing a log") + .push(captured_record); + } +} + +#[tokio_integration_test] +async fn log() { + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/log"); + let mut bpf = Bpf::load(bytes).unwrap(); + + let (logger, captured_logs) = TestingLogger::with_capacity(5); + BpfLogger::init_with_logger(&mut bpf, logger).unwrap(); + + let prog: &mut UProbe = bpf.program_mut("test_log").unwrap().try_into().unwrap(); + prog.load().unwrap(); + prog.attach(Some("trigger_ebpf_program"), 0, "/proc/self/exe", None) + .unwrap(); + + // Call the function that the uprobe is attached to, so it starts logging. + trigger_ebpf_program(); + captured_logs.wait_expected_len(6).await; + + let records = captured_logs + .lock() + .expect("Failed to acquire a lock for reading logs"); + assert_eq!(records.len(), 6); + + assert_eq!(records[0].body, "Hello from eBPF!"); + assert_eq!(records[0].level, Level::Debug); + assert_eq!(records[0].target, "log"); + + assert_eq!(records[1].body, "69, 420, wao"); + assert_eq!(records[1].level, Level::Error); + assert_eq!(records[1].target, "log"); + + assert_eq!(records[2].body, "ipv4: 10.0.0.1, ipv6: 2001:db8::1"); + assert_eq!(records[2].level, Level::Info); + assert_eq!(records[2].target, "log"); + + assert_eq!( + records[3].body, + "mac lc: 04:20:06:09:00:40, mac uc: 04:20:06:09:00:40" + ); + assert_eq!(records[3].level, Level::Trace); + assert_eq!(records[3].target, "log"); + + assert_eq!(records[4].body, "hex lc: 2f, hex uc: 2F"); + assert_eq!(records[4].level, Level::Warn); + assert_eq!(records[4].target, "log"); + + assert_eq!(records[5].body, "hex lc: deadbeef, hex uc: DEADBEEF"); + assert_eq!(records[5].level, Level::Debug); + assert_eq!(records[5].target, "log"); +} diff --git a/test/integration-test/src/tests/mod.rs b/test/integration-test/src/tests/mod.rs index 127b037d..c26ca5a4 100644 --- a/test/integration-test/src/tests/mod.rs +++ b/test/integration-test/src/tests/mod.rs @@ -7,11 +7,13 @@ use std::{ffi::CStr, mem}; pub mod btf_relocations; pub mod elf; pub mod load; +pub mod log; pub mod rbpf; pub mod relocations; pub mod smoke; -pub use integration_test_macros::integration_test; +pub use integration_test_macros::{integration_test, tokio_integration_test}; + #[derive(Debug)] pub struct IntegrationTest { pub name: &'static str, diff --git a/test/run.sh b/test/run.sh index a33449eb..46caf224 100755 --- a/test/run.sh +++ b/test/run.sh @@ -197,7 +197,7 @@ EOF exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \ -y --profile minimal --default-toolchain nightly --component rust-src --component clippy' exec_vm 'echo source ~/.cargo/env >> ~/.bashrc' - exec_vm cargo install bpf-linker --no-default-features --features system-llvm + exec_vm cargo install bpf-linker --no-default-features } scp_vm() { diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 38bf920f..671a987c 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" aya-tool = { path = "../aya-tool" } clap = { version = "4", features = ["derive"] } anyhow = "1" -syn = "1" +syn = "2" quote = "1" proc-macro2 = "1" indoc = "2.0" diff --git a/xtask/src/codegen/aya_bpf_bindings.rs b/xtask/src/codegen/aya_bpf_bindings.rs index bcd7d44c..1bfcbc5b 100644 --- a/xtask/src/codegen/aya_bpf_bindings.rs +++ b/xtask/src/codegen/aya_bpf_bindings.rs @@ -46,6 +46,7 @@ pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> { "sk_action", "pt_regs", "user_pt_regs", + "user_regs_struct", "xdp_action", ]; let vars = ["BPF_.*", "bpf_.*", "TC_ACT_.*", "SOL_SOCKET", "SO_.*"];