Merge branch 'aya-rs:main' into main

pull/530/head
qjerome 2 years ago committed by GitHub
commit 0811203286
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -7,3 +7,7 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: "weekly" interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

@ -26,13 +26,12 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: nightly toolchain: nightly
components: rust-src components: rust-src
override: true
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1

@ -24,12 +24,15 @@ jobs:
- riscv64gc-unknown-linux-gnu - riscv64gc-unknown-linux-gnu
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
toolchain: stable toolchain: stable
override: true
- uses: taiki-e/install-action@cargo-hack
- name: Check
run: cargo hack check --all-targets --feature-powerset --ignore-private
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1
- name: Prereqs - name: Prereqs

@ -7,9 +7,9 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/checkout@v2 - uses: actions/checkout@v3
with: with:
repository: libbpf/libbpf repository: libbpf/libbpf
path: libbpf path: libbpf
@ -18,12 +18,10 @@ jobs:
working-directory: libbpf working-directory: libbpf
run: echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV run: echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
profile: minimal
toolchain: nightly toolchain: nightly
components: rustfmt, clippy components: rustfmt, clippy
override: true
- uses: Swatinem/rust-cache@v1 - uses: Swatinem/rust-cache@v1

@ -17,8 +17,8 @@ jobs:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions/checkout@v2 - uses: actions/checkout@v3
with: with:
repository: libbpf/libbpf repository: libbpf/libbpf
path: libbpf path: libbpf

@ -18,14 +18,12 @@ jobs:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1 - uses: dtolnay/rust-toolchain@master
with: with:
profile: minimal
toolchain: nightly toolchain: nightly
components: rustfmt, clippy, miri, rust-src components: rustfmt, clippy, miri, rust-src
override: true
- name: Check formatting - name: Check formatting
run: | run: |

@ -9,7 +9,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags/') if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@ -18,7 +18,7 @@ jobs:
- name: Build Changelog - name: Build Changelog
id: github_release id: github_release
uses: mikepenz/release-changelog-builder-action@v2 uses: mikepenz/release-changelog-builder-action@v3
with: with:
configuration: ".github/changelog-config.json" configuration: ".github/changelog-config.json"
env: env:

@ -10,7 +10,7 @@ proc-macro = true
[dependencies] [dependencies]
proc-macro2 = "1.0" proc-macro2 = "1.0"
quote = "1.0" quote = "1.0"
syn = {version = "1.0", features = ["full"]} syn = {version = "2.0", features = ["full"]}
[dev-dependencies] [dev-dependencies]
aya-bpf = { path = "../bpf/aya-bpf" } aya-bpf = { path = "../bpf/aya-bpf" }

@ -212,7 +212,7 @@ impl Xdp {
frags = m frags = m
} else { } else {
return Err(Error::new_spanned( return Err(Error::new_spanned(
"mutlibuffer", s,
"invalid value. should be 'true' or 'false'", "invalid value. should be 'true' or 'false'",
)); ));
} }
@ -613,18 +613,39 @@ impl RawTracePoint {
pub struct Lsm { pub struct Lsm {
item: ItemFn, item: ItemFn,
name: String, name: Option<String>,
sleepable: bool,
} }
impl Lsm { impl Lsm {
pub fn from_syn(mut args: Args, item: ItemFn) -> Result<Lsm> { pub fn from_syn(mut args: Args, item: ItemFn) -> Result<Lsm> {
let name = name_arg(&mut args)?.unwrap_or_else(|| item.sig.ident.to_string()); let name = pop_arg(&mut args, "name");
let mut sleepable = false;
Ok(Lsm { item, name }) if let Some(s) = pop_arg(&mut args, "sleepable") {
if let Ok(m) = s.parse() {
sleepable = m
} else {
return Err(Error::new_spanned(
s,
"invalid value. should be 'true' or 'false'",
));
}
}
err_on_unknown_args(&args)?;
Ok(Lsm {
item,
name,
sleepable,
})
} }
pub fn expand(&self) -> Result<TokenStream> { pub fn expand(&self) -> Result<TokenStream> {
let section_name = format!("lsm/{}", self.name); let section_prefix = if self.sleepable { "lsm.s" } else { "lsm" };
let section_name = if let Some(name) = &self.name {
format!("{section_prefix}/{name}")
} else {
section_prefix.to_string()
};
let fn_vis = &self.item.vis; let fn_vis = &self.item.vis;
let fn_name = &self.item.sig.ident; let fn_name = &self.item.sig.ident;
let item = &self.item; let item = &self.item;

@ -118,6 +118,40 @@ pub fn cgroup_skb(attrs: TokenStream, item: TokenStream) -> TokenStream {
.into() .into()
} }
/// Marks a function as a [`CgroupSockAddr`] eBPF program.
///
/// [`CgroupSockAddr`] programs can be used to inspect or modify socket addresses passed to
/// various syscalls within a [cgroup]. The `attach_type` argument specifies a place to attach
/// the eBPF program to. See [`CgroupSockAddrAttachType`] for more details.
///
/// [cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
/// [`CgroupSockAddrAttachType`]: ../aya/programs/cgroup_sock_addr/enum.CgroupSockAddrAttachType.html
/// [`CgroupSockAddr`]: ../aya/programs/cgroup_sock_addr/struct.CgroupSockAddr.html
///
/// # Minimum kernel version
///
/// The minimum kernel version required to use this feature is 4.17.
///
/// # Examples
///
/// ```no_run
/// use aya_bpf::{macros::cgroup_sock_addr, programs::SockAddrContext};
///
/// #[cgroup_sock_addr(connect4)]
/// pub fn connect4(ctx: SockAddrContext) -> i32 {
/// match try_connect4(ctx) {
/// Ok(ret) => ret,
/// Err(ret) => match ret.try_into() {
/// Ok(rt) => rt,
/// Err(_) => 1,
/// },
/// }
/// }
///
/// fn try_connect4(ctx: SockAddrContext) -> Result<i32, i64> {
/// Ok(0)
/// }
/// ```
#[proc_macro_attribute] #[proc_macro_attribute]
pub fn cgroup_sock_addr(attrs: TokenStream, item: TokenStream) -> TokenStream { pub fn cgroup_sock_addr(attrs: TokenStream, item: TokenStream) -> TokenStream {
let args = parse_macro_input!(attrs as SockAddrArgs); let args = parse_macro_input!(attrs as SockAddrArgs);

@ -9,12 +9,7 @@ repository = "https://github.com/aya-rs/aya-log"
documentation = "https://docs.rs/aya-log" documentation = "https://docs.rs/aya-log"
edition = "2021" edition = "2021"
[features]
default = []
userspace = [ "aya" ]
[dependencies] [dependencies]
aya = { path = "../aya", version = "0.11.0", optional=true }
num_enum = { version = "0.6", default-features = false } num_enum = { version = "0.6", default-features = false }
[lib] [lib]

@ -1,6 +1,6 @@
#![no_std] #![no_std]
use core::{cmp, mem, ptr, slice}; use core::{mem, num, ptr};
use num_enum::IntoPrimitive; use num_enum::IntoPrimitive;
@ -8,8 +8,10 @@ pub const LOG_BUF_CAPACITY: usize = 8192;
pub const LOG_FIELDS: usize = 6; pub const LOG_FIELDS: usize = 6;
#[repr(usize)] pub type LogValueLength = u16;
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)]
#[repr(u8)]
#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash, IntoPrimitive)]
pub enum Level { pub enum Level {
/// The "error" level. /// The "error" level.
/// ///
@ -33,7 +35,7 @@ pub enum Level {
Trace, Trace,
} }
#[repr(usize)] #[repr(u8)]
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub enum RecordField { pub enum RecordField {
Target = 1, Target = 1,
@ -46,7 +48,7 @@ pub enum RecordField {
/// Types which are supported by aya-log and can be safely sent from eBPF /// Types which are supported by aya-log and can be safely sent from eBPF
/// programs to userspace. /// programs to userspace.
#[repr(usize)] #[repr(u8)]
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub enum Argument { pub enum Argument {
DisplayHint, DisplayHint,
@ -73,6 +75,7 @@ pub enum Argument {
/// `[u16; 8]` array which represents an IPv6 address. /// `[u16; 8]` array which represents an IPv6 address.
ArrU16Len8, ArrU16Len8,
Bytes,
Str, Str,
} }
@ -96,64 +99,65 @@ pub enum DisplayHint {
UpperMac, UpperMac,
} }
#[cfg(feature = "userspace")] struct TagLenValue<T, V> {
mod userspace { pub tag: T,
use super::*; pub value: V,
unsafe impl aya::Pod for RecordField {}
unsafe impl aya::Pod for Argument {}
unsafe impl aya::Pod for DisplayHint {}
}
struct TagLenValue<'a, T> {
tag: T,
value: &'a [u8],
} }
impl<'a, T> TagLenValue<'a, T> impl<T, V> TagLenValue<T, V>
where where
T: Copy, V: IntoIterator<Item = u8>,
<V as IntoIterator>::IntoIter: ExactSizeIterator,
{ {
#[inline(always)] pub(crate) fn write(self, mut buf: &mut [u8]) -> Result<usize, ()> {
pub(crate) fn new(tag: T, value: &'a [u8]) -> TagLenValue<'a, T> { // Break the abstraction to please the verifier.
TagLenValue { tag, value } if buf.len() > LOG_BUF_CAPACITY {
buf = &mut buf[..LOG_BUF_CAPACITY];
} }
let Self { tag, value } = self;
pub(crate) fn write(&self, mut buf: &mut [u8]) -> Result<usize, ()> { let value = value.into_iter();
let size = mem::size_of::<T>() + mem::size_of::<usize>() + self.value.len(); let len = value.len();
let remaining = cmp::min(buf.len(), LOG_BUF_CAPACITY); let wire_len: LogValueLength = value
// Check if the size doesn't exceed the buffer bounds. .len()
if size > remaining { .try_into()
.map_err(|num::TryFromIntError { .. }| ())?;
let size = mem::size_of_val(&tag) + mem::size_of_val(&wire_len) + len;
if size > buf.len() {
return Err(()); return Err(());
} }
unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, self.tag) }; let tag_size = mem::size_of_val(&tag);
buf = &mut buf[mem::size_of::<T>()..]; unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, tag) };
buf = &mut buf[tag_size..];
unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, self.value.len()) }; unsafe { ptr::write_unaligned(buf.as_mut_ptr() as *mut _, wire_len) };
buf = &mut buf[mem::size_of::<usize>()..]; buf = &mut buf[mem::size_of_val(&wire_len)..];
buf.iter_mut().zip(value).for_each(|(dst, src)| {
*dst = src;
});
let len = cmp::min(buf.len(), self.value.len());
// The verifier isn't happy with `len` being unbounded, so compare it
// with `LOG_BUF_CAPACITY`.
if len > LOG_BUF_CAPACITY {
return Err(());
}
buf[..len].copy_from_slice(&self.value[..len]);
Ok(size) Ok(size)
} }
} }
impl<T, V> TagLenValue<T, V> {
#[inline(always)]
pub(crate) fn new(tag: T, value: V) -> TagLenValue<T, V> {
TagLenValue { tag, value }
}
}
pub trait WriteToBuf { pub trait WriteToBuf {
#[allow(clippy::result_unit_err)] #[allow(clippy::result_unit_err)]
fn write(&self, buf: &mut [u8]) -> Result<usize, ()>; fn write(self, buf: &mut [u8]) -> Result<usize, ()>;
} }
macro_rules! impl_write_to_buf { macro_rules! impl_write_to_buf {
($type:ident, $arg_type:expr) => { ($type:ident, $arg_type:expr) => {
impl WriteToBuf for $type { impl WriteToBuf for $type {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
TagLenValue::<Argument>::new($arg_type, &self.to_ne_bytes()).write(buf) TagLenValue::new($arg_type, self.to_ne_bytes()).write(buf)
} }
} }
}; };
@ -175,35 +179,40 @@ impl_write_to_buf!(f32, Argument::F32);
impl_write_to_buf!(f64, Argument::F64); impl_write_to_buf!(f64, Argument::F64);
impl WriteToBuf for [u8; 16] { impl WriteToBuf for [u8; 16] {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
TagLenValue::<Argument>::new(Argument::ArrU8Len16, self).write(buf) TagLenValue::new(Argument::ArrU8Len16, self).write(buf)
} }
} }
impl WriteToBuf for [u16; 8] { impl WriteToBuf for [u16; 8] {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
let ptr = self.as_ptr().cast::<u8>(); let bytes = unsafe { core::mem::transmute::<_, [u8; 16]>(self) };
let bytes = unsafe { slice::from_raw_parts(ptr, 16) }; TagLenValue::new(Argument::ArrU16Len8, bytes).write(buf)
TagLenValue::<Argument>::new(Argument::ArrU16Len8, bytes).write(buf)
} }
} }
impl WriteToBuf for [u8; 6] { impl WriteToBuf for [u8; 6] {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
TagLenValue::<Argument>::new(Argument::ArrU8Len6, self).write(buf) TagLenValue::new(Argument::ArrU8Len6, self).write(buf)
}
}
impl WriteToBuf for &[u8] {
fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
TagLenValue::new(Argument::Bytes, self.iter().copied()).write(buf)
} }
} }
impl WriteToBuf for str { impl WriteToBuf for &str {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
TagLenValue::<Argument>::new(Argument::Str, self.as_bytes()).write(buf) TagLenValue::new(Argument::Str, self.as_bytes().iter().copied()).write(buf)
} }
} }
impl WriteToBuf for DisplayHint { impl WriteToBuf for DisplayHint {
fn write(&self, buf: &mut [u8]) -> Result<usize, ()> { fn write(self, buf: &mut [u8]) -> Result<usize, ()> {
let v: u8 = (*self).into(); let v: u8 = self.into();
TagLenValue::<Argument>::new(Argument::DisplayHint, &v.to_ne_bytes()).write(buf) TagLenValue::new(Argument::DisplayHint, v.to_ne_bytes()).write(buf)
} }
} }
@ -219,17 +228,31 @@ pub fn write_record_header(
line: u32, line: u32,
num_args: usize, num_args: usize,
) -> Result<usize, ()> { ) -> Result<usize, ()> {
let level: u8 = level.into();
let mut size = 0; let mut size = 0;
for attr in [ size += TagLenValue::new(RecordField::Target, target.as_bytes().iter().copied())
TagLenValue::<RecordField>::new(RecordField::Target, target.as_bytes()), .write(&mut buf[size..])?;
TagLenValue::<RecordField>::new(RecordField::Level, &(level as usize).to_ne_bytes()), size += TagLenValue::new(RecordField::Level, level.to_ne_bytes()).write(&mut buf[size..])?;
TagLenValue::<RecordField>::new(RecordField::Module, module.as_bytes()), size += TagLenValue::new(RecordField::Module, module.as_bytes().iter().copied())
TagLenValue::<RecordField>::new(RecordField::File, file.as_bytes()), .write(&mut buf[size..])?;
TagLenValue::<RecordField>::new(RecordField::Line, &line.to_ne_bytes()), size += TagLenValue::new(RecordField::File, file.as_bytes().iter().copied())
TagLenValue::<RecordField>::new(RecordField::NumArgs, &num_args.to_ne_bytes()), .write(&mut buf[size..])?;
] { size += TagLenValue::new(RecordField::Line, line.to_ne_bytes()).write(&mut buf[size..])?;
size += attr.write(&mut buf[size..])?; size +=
} TagLenValue::new(RecordField::NumArgs, num_args.to_ne_bytes()).write(&mut buf[size..])?;
Ok(size) Ok(size)
} }
#[cfg(test)]
mod test {
use super::*;
fn log_value_length_sufficient() {
assert!(
LOG_BUF_CAPACITY >= LogValueLength::MAX.into(),
"{} < {}",
LOG_BUF_CAPACITY,
LogValueLength::MAX
);
}
}

@ -8,7 +8,7 @@ aya-log-common = { path = "../aya-log-common" }
aya-log-parser = { path = "../aya-log-parser" } aya-log-parser = { path = "../aya-log-parser" }
proc-macro2 = "1.0" proc-macro2 = "1.0"
quote = "1.0" quote = "1.0"
syn = "1.0" syn = "2.0"
[lib] [lib]
proc-macro = true proc-macro = true

@ -151,12 +151,11 @@ pub(crate) fn log(args: LogArgs, level: Option<TokenStream>) -> Result<TokenStre
let record_len = header_len; let record_len = header_len;
if let Ok(record_len) = { if let Ok(record_len) = {
use ::aya_log_ebpf::WriteToBuf;
Ok::<_, ()>(record_len) #( .and_then(|record_len| { Ok::<_, ()>(record_len) #( .and_then(|record_len| {
if record_len >= buf.buf.len() { if record_len >= buf.buf.len() {
return Err(()); return Err(());
} }
{ #values_iter }.write(&mut buf.buf[record_len..]).map(|len| record_len + len) aya_log_ebpf::WriteToBuf::write({ #values_iter }, &mut buf.buf[record_len..]).map(|len| record_len + len)
}) )* }) )*
} { } {
unsafe { ::aya_log_ebpf::AYA_LOGS.output( unsafe { ::aya_log_ebpf::AYA_LOGS.output(

@ -12,7 +12,7 @@ edition = "2021"
[dependencies] [dependencies]
aya = { path = "../aya", version = "0.11.0", features=["async_tokio"] } aya = { path = "../aya", version = "0.11.0", features=["async_tokio"] }
aya-log-common = { path = "../aya-log-common", version = "0.1.13", features=["userspace"] } aya-log-common = { path = "../aya-log-common", version = "0.1.13" }
thiserror = "1" thiserror = "1"
log = "0.4" log = "0.4"
bytes = "1.1" bytes = "1.1"

@ -59,9 +59,11 @@ use std::{
const MAP_NAME: &str = "AYA_LOGS"; const MAP_NAME: &str = "AYA_LOGS";
use aya_log_common::{Argument, DisplayHint, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS}; use aya_log_common::{
Argument, DisplayHint, Level, LogValueLength, RecordField, LOG_BUF_CAPACITY, LOG_FIELDS,
};
use bytes::BytesMut; use bytes::BytesMut;
use log::{error, Level, Log, Record}; use log::{error, Log, Record};
use thiserror::Error; use thiserror::Error;
use aya::{ use aya::{
@ -73,6 +75,20 @@ use aya::{
Bpf, Pod, Bpf, Pod,
}; };
#[derive(Copy, Clone)]
#[repr(transparent)]
struct RecordFieldWrapper(RecordField);
#[derive(Copy, Clone)]
#[repr(transparent)]
struct ArgumentWrapper(Argument);
#[derive(Copy, Clone)]
#[repr(transparent)]
struct DisplayHintWrapper(DisplayHint);
unsafe impl aya::Pod for RecordFieldWrapper {}
unsafe impl aya::Pod for ArgumentWrapper {}
unsafe impl aya::Pod for DisplayHintWrapper {}
/// Log messages generated by `aya_log_ebpf` using the [log] crate. /// Log messages generated by `aya_log_ebpf` using the [log] crate.
/// ///
/// For more details see the [module level documentation](crate). /// For more details see the [module level documentation](crate).
@ -102,9 +118,7 @@ impl BpfLogger {
let log = logger.clone(); let log = logger.clone();
tokio::spawn(async move { tokio::spawn(async move {
let mut buffers = (0..10) let mut buffers = vec![BytesMut::with_capacity(LOG_BUF_CAPACITY); 10];
.map(|_| BytesMut::with_capacity(LOG_BUF_CAPACITY))
.collect::<Vec<_>>();
loop { loop {
let events = buf.read_events(&mut buffers).await.unwrap(); let events = buf.read_events(&mut buffers).await.unwrap();
@ -146,6 +160,20 @@ where
} }
} }
pub struct LowerHexDebugFormatter;
impl<T> Formatter<&[T]> for LowerHexDebugFormatter
where
T: LowerHex,
{
fn format(v: &[T]) -> String {
let mut s = String::new();
for v in v {
let () = core::fmt::write(&mut s, format_args!("{v:x}")).unwrap();
}
s
}
}
pub struct UpperHexFormatter; pub struct UpperHexFormatter;
impl<T> Formatter<T> for UpperHexFormatter impl<T> Formatter<T> for UpperHexFormatter
where where
@ -156,6 +184,20 @@ where
} }
} }
pub struct UpperHexDebugFormatter;
impl<T> Formatter<&[T]> for UpperHexDebugFormatter
where
T: UpperHex,
{
fn format(v: &[T]) -> String {
let mut s = String::new();
for v in v {
let () = core::fmt::write(&mut s, format_args!("{v:X}")).unwrap();
}
s
}
}
pub struct Ipv4Formatter; pub struct Ipv4Formatter;
impl<T> Formatter<T> for Ipv4Formatter impl<T> Formatter<T> for Ipv4Formatter
where where
@ -197,12 +239,22 @@ impl Formatter<[u8; 6]> for UpperMacFormatter {
} }
trait Format { trait Format {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()>; fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()>;
}
impl Format for &[u8] {
fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::LowerHex) => Ok(LowerHexDebugFormatter::format(self)),
Some(DisplayHint::UpperHex) => Ok(UpperHexDebugFormatter::format(self)),
_ => Err(()),
}
}
} }
impl Format for u32 { impl Format for u32 {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)), Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)),
Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)), Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)),
@ -216,8 +268,8 @@ impl Format for u32 {
} }
impl Format for [u8; 6] { impl Format for [u8; 6] {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Err(()), Some(DisplayHint::Default) => Err(()),
Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::LowerHex) => Err(()),
Some(DisplayHint::UpperHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()),
@ -231,8 +283,8 @@ impl Format for [u8; 6] {
} }
impl Format for [u8; 16] { impl Format for [u8; 16] {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Err(()), Some(DisplayHint::Default) => Err(()),
Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::LowerHex) => Err(()),
Some(DisplayHint::UpperHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()),
@ -246,8 +298,8 @@ impl Format for [u8; 16] {
} }
impl Format for [u16; 8] { impl Format for [u16; 8] {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Err(()), Some(DisplayHint::Default) => Err(()),
Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::LowerHex) => Err(()),
Some(DisplayHint::UpperHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()),
@ -263,8 +315,8 @@ impl Format for [u16; 8] {
macro_rules! impl_format { macro_rules! impl_format {
($type:ident) => { ($type:ident) => {
impl Format for $type { impl Format for $type {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)), Some(DisplayHint::LowerHex) => Ok(LowerHexFormatter::format(self)),
Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)), Some(DisplayHint::UpperHex) => Ok(UpperHexFormatter::format(self)),
@ -293,8 +345,8 @@ impl_format!(usize);
macro_rules! impl_format_float { macro_rules! impl_format_float {
($type:ident) => { ($type:ident) => {
impl Format for $type { impl Format for $type {
fn format(&self, last_hint: Option<DisplayHint>) -> Result<String, ()> { fn format(&self, last_hint: Option<DisplayHintWrapper>) -> Result<String, ()> {
match last_hint { match last_hint.map(|DisplayHintWrapper(dh)| dh) {
Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)), Some(DisplayHint::Default) => Ok(DefaultFormatter::format(self)),
Some(DisplayHint::LowerHex) => Err(()), Some(DisplayHint::LowerHex) => Err(()),
Some(DisplayHint::UpperHex) => Err(()), Some(DisplayHint::UpperHex) => Err(()),
@ -346,33 +398,42 @@ pub enum Error {
fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> { fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> {
let mut target = None; let mut target = None;
let mut level = Level::Trace; let mut level = None;
let mut module = None; let mut module = None;
let mut file = None; let mut file = None;
let mut line = None; let mut line = None;
let mut num_args = None; let mut num_args = None;
for _ in 0..LOG_FIELDS { for _ in 0..LOG_FIELDS {
let (attr, rest) = unsafe { TagLenValue::<'_, RecordField>::try_read(buf)? }; let (RecordFieldWrapper(tag), value, rest) = try_read(buf)?;
match attr.tag { match tag {
RecordField::Target => { RecordField::Target => {
target = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); target = Some(str::from_utf8(value).map_err(|_| ())?);
} }
RecordField::Level => { RecordField::Level => {
level = unsafe { ptr::read_unaligned(attr.value.as_ptr() as *const _) } level = Some({
let level = unsafe { ptr::read_unaligned(value.as_ptr() as *const _) };
match level {
Level::Error => log::Level::Error,
Level::Warn => log::Level::Warn,
Level::Info => log::Level::Info,
Level::Debug => log::Level::Debug,
Level::Trace => log::Level::Trace,
}
})
} }
RecordField::Module => { RecordField::Module => {
module = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); module = Some(str::from_utf8(value).map_err(|_| ())?);
} }
RecordField::File => { RecordField::File => {
file = Some(std::str::from_utf8(attr.value).map_err(|_| ())?); file = Some(str::from_utf8(value).map_err(|_| ())?);
} }
RecordField::Line => { RecordField::Line => {
line = Some(u32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?)); line = Some(u32::from_ne_bytes(value.try_into().map_err(|_| ())?));
} }
RecordField::NumArgs => { RecordField::NumArgs => {
num_args = Some(usize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?)); num_args = Some(usize::from_ne_bytes(value.try_into().map_err(|_| ())?));
} }
} }
@ -380,103 +441,106 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> {
} }
let mut full_log_msg = String::new(); let mut full_log_msg = String::new();
let mut last_hint: Option<DisplayHint> = None; let mut last_hint: Option<DisplayHintWrapper> = None;
for _ in 0..num_args.ok_or(())? { for _ in 0..num_args.ok_or(())? {
let (attr, rest) = unsafe { TagLenValue::<'_, Argument>::try_read(buf)? }; let (ArgumentWrapper(tag), value, rest) = try_read(buf)?;
match attr.tag { match tag {
Argument::DisplayHint => { Argument::DisplayHint => {
last_hint = Some(unsafe { ptr::read_unaligned(attr.value.as_ptr() as *const _) }); last_hint = Some(unsafe { ptr::read_unaligned(value.as_ptr() as *const _) });
} }
Argument::I8 => { Argument::I8 => {
full_log_msg.push_str( full_log_msg.push_str(
&i8::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &i8::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::I16 => { Argument::I16 => {
full_log_msg.push_str( full_log_msg.push_str(
&i16::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &i16::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::I32 => { Argument::I32 => {
full_log_msg.push_str( full_log_msg.push_str(
&i32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &i32::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::I64 => { Argument::I64 => {
full_log_msg.push_str( full_log_msg.push_str(
&i64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &i64::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::Isize => { Argument::Isize => {
full_log_msg.push_str( full_log_msg.push_str(
&isize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &isize::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::U8 => { Argument::U8 => {
full_log_msg.push_str( full_log_msg.push_str(
&u8::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &u8::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::U16 => { Argument::U16 => {
full_log_msg.push_str( full_log_msg.push_str(
&u16::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &u16::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::U32 => { Argument::U32 => {
full_log_msg.push_str( full_log_msg.push_str(
&u32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &u32::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::U64 => { Argument::U64 => {
full_log_msg.push_str( full_log_msg.push_str(
&u64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &u64::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::Usize => { Argument::Usize => {
full_log_msg.push_str( full_log_msg.push_str(
&usize::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &usize::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::F32 => { Argument::F32 => {
full_log_msg.push_str( full_log_msg.push_str(
&f32::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &f32::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::F64 => { Argument::F64 => {
full_log_msg.push_str( full_log_msg.push_str(
&f64::from_ne_bytes(attr.value.try_into().map_err(|_| ())?) &f64::from_ne_bytes(value.try_into().map_err(|_| ())?)
.format(last_hint.take())?, .format(last_hint.take())?,
); );
} }
Argument::ArrU8Len6 => { Argument::ArrU8Len6 => {
let value: [u8; 6] = attr.value.try_into().map_err(|_| ())?; let value: [u8; 6] = value.try_into().map_err(|_| ())?;
full_log_msg.push_str(&value.format(last_hint.take())?); full_log_msg.push_str(&value.format(last_hint.take())?);
} }
Argument::ArrU8Len16 => { Argument::ArrU8Len16 => {
let value: [u8; 16] = attr.value.try_into().map_err(|_| ())?; let value: [u8; 16] = value.try_into().map_err(|_| ())?;
full_log_msg.push_str(&value.format(last_hint.take())?); full_log_msg.push_str(&value.format(last_hint.take())?);
} }
Argument::ArrU16Len8 => { Argument::ArrU16Len8 => {
let data: [u8; 16] = attr.value.try_into().map_err(|_| ())?; let data: [u8; 16] = value.try_into().map_err(|_| ())?;
let mut value: [u16; 8] = Default::default(); let mut value: [u16; 8] = Default::default();
for (i, s) in data.chunks_exact(2).enumerate() { for (i, s) in data.chunks_exact(2).enumerate() {
value[i] = ((s[1] as u16) << 8) | s[0] as u16; value[i] = ((s[1] as u16) << 8) | s[0] as u16;
} }
full_log_msg.push_str(&value.format(last_hint.take())?); full_log_msg.push_str(&value.format(last_hint.take())?);
} }
Argument::Str => match str::from_utf8(attr.value) { Argument::Bytes => {
full_log_msg.push_str(&value.format(last_hint.take())?);
}
Argument::Str => match str::from_utf8(value) {
Ok(v) => { Ok(v) => {
full_log_msg.push_str(v); full_log_msg.push_str(v);
} }
@ -491,7 +555,7 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> {
&Record::builder() &Record::builder()
.args(format_args!("{full_log_msg}")) .args(format_args!("{full_log_msg}"))
.target(target.ok_or(())?) .target(target.ok_or(())?)
.level(level) .level(level.ok_or(())?)
.module_path(module) .module_path(module)
.file(file) .file(file)
.line(line) .line(line)
@ -501,42 +565,32 @@ fn log_buf(mut buf: &[u8], logger: &dyn Log) -> Result<(), ()> {
Ok(()) Ok(())
} }
struct TagLenValue<'a, T: Pod> { fn try_read<T: Pod>(mut buf: &[u8]) -> Result<(T, &[u8], &[u8]), ()> {
tag: T, if buf.len() < mem::size_of::<T>() + mem::size_of::<LogValueLength>() {
value: &'a [u8],
}
impl<'a, T: Pod> TagLenValue<'a, T> {
unsafe fn try_read(mut buf: &'a [u8]) -> Result<(TagLenValue<'a, T>, &'a [u8]), ()> {
if buf.len() < mem::size_of::<T>() + mem::size_of::<usize>() {
return Err(()); return Err(());
} }
let tag = ptr::read_unaligned(buf.as_ptr() as *const T); let tag = unsafe { ptr::read_unaligned(buf.as_ptr() as *const T) };
buf = &buf[mem::size_of::<T>()..]; buf = &buf[mem::size_of::<T>()..];
let len = usize::from_ne_bytes(buf[..mem::size_of::<usize>()].try_into().unwrap()); let len =
buf = &buf[mem::size_of::<usize>()..]; LogValueLength::from_ne_bytes(buf[..mem::size_of::<LogValueLength>()].try_into().unwrap());
buf = &buf[mem::size_of::<LogValueLength>()..];
let len: usize = len.into();
if buf.len() < len { if buf.len() < len {
return Err(()); return Err(());
} }
Ok(( let (value, rest) = buf.split_at(len);
TagLenValue { Ok((tag, value, rest))
tag,
value: &buf[..len],
},
&buf[len..],
))
}
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use aya_log_common::{write_record_header, WriteToBuf}; use aya_log_common::{write_record_header, WriteToBuf};
use log::logger; use log::{logger, Level};
fn new_log(args: usize) -> Result<(usize, Vec<u8>), ()> { fn new_log(args: usize) -> Result<(usize, Vec<u8>), ()> {
let mut buf = vec![0; 8192]; let mut buf = vec![0; 8192];
@ -555,14 +609,14 @@ mod test {
#[test] #[test]
fn test_str() { fn test_str() {
testing_logger::setup(); testing_logger::setup();
let (len, mut input) = new_log(1).unwrap(); let (mut len, mut input) = new_log(1).unwrap();
"test" len += "test".write(&mut input[len..]).unwrap();
.write(&mut input[len..])
.expect("could not write to the buffer"); _ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "test"); assert_eq!(captured_logs[0].body, "test");
@ -575,13 +629,13 @@ mod test {
testing_logger::setup(); testing_logger::setup();
let (mut len, mut input) = new_log(2).unwrap(); let (mut len, mut input) = new_log(2).unwrap();
len += "hello " len += "hello ".write(&mut input[len..]).unwrap();
.write(&mut input[len..]) len += "test".write(&mut input[len..]).unwrap();
.expect("could not write to the buffer");
"test".write(&mut input[len..]).unwrap(); _ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "hello test"); assert_eq!(captured_logs[0].body, "hello test");
@ -589,6 +643,49 @@ mod test {
}); });
} }
#[test]
fn test_bytes() {
testing_logger::setup();
let (mut len, mut input) = new_log(2).unwrap();
len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap();
len += [0xde, 0xad].write(&mut input[len..]).unwrap();
_ = len;
let logger = logger();
let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "dead");
assert_eq!(captured_logs[0].level, Level::Info);
});
}
#[test]
fn test_bytes_with_args() {
testing_logger::setup();
let (mut len, mut input) = new_log(5).unwrap();
len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap();
len += [0xde, 0xad].write(&mut input[len..]).unwrap();
len += " ".write(&mut input[len..]).unwrap();
len += DisplayHint::UpperHex.write(&mut input[len..]).unwrap();
len += [0xbe, 0xef].write(&mut input[len..]).unwrap();
_ = len;
let logger = logger();
let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "dead BEEF");
assert_eq!(captured_logs[0].level, Level::Info);
});
}
#[test] #[test]
fn test_display_hint_default() { fn test_display_hint_default() {
testing_logger::setup(); testing_logger::setup();
@ -596,10 +693,12 @@ mod test {
len += "default hint: ".write(&mut input[len..]).unwrap(); len += "default hint: ".write(&mut input[len..]).unwrap();
len += DisplayHint::Default.write(&mut input[len..]).unwrap(); len += DisplayHint::Default.write(&mut input[len..]).unwrap();
14.write(&mut input[len..]).unwrap(); len += 14.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "default hint: 14"); assert_eq!(captured_logs[0].body, "default hint: 14");
@ -614,10 +713,12 @@ mod test {
len += "lower hex: ".write(&mut input[len..]).unwrap(); len += "lower hex: ".write(&mut input[len..]).unwrap();
len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap(); len += DisplayHint::LowerHex.write(&mut input[len..]).unwrap();
200.write(&mut input[len..]).unwrap(); len += 200.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "lower hex: c8"); assert_eq!(captured_logs[0].body, "lower hex: c8");
@ -632,10 +733,12 @@ mod test {
len += "upper hex: ".write(&mut input[len..]).unwrap(); len += "upper hex: ".write(&mut input[len..]).unwrap();
len += DisplayHint::UpperHex.write(&mut input[len..]).unwrap(); len += DisplayHint::UpperHex.write(&mut input[len..]).unwrap();
200.write(&mut input[len..]).unwrap(); len += 200.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "upper hex: C8"); assert_eq!(captured_logs[0].body, "upper hex: C8");
@ -651,10 +754,12 @@ mod test {
len += "ipv4: ".write(&mut input[len..]).unwrap(); len += "ipv4: ".write(&mut input[len..]).unwrap();
len += DisplayHint::Ipv4.write(&mut input[len..]).unwrap(); len += DisplayHint::Ipv4.write(&mut input[len..]).unwrap();
// 10.0.0.1 as u32 // 10.0.0.1 as u32
167772161u32.write(&mut input[len..]).unwrap(); len += 167772161u32.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1"); assert_eq!(captured_logs[0].body, "ipv4: 10.0.0.1");
@ -674,10 +779,12 @@ mod test {
0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x01, 0x00, 0x01,
]; ];
ipv6_arr.write(&mut input[len..]).unwrap(); len += ipv6_arr.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1"); assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
@ -696,10 +803,12 @@ mod test {
let ipv6_arr: [u16; 8] = [ let ipv6_arr: [u16; 8] = [
0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x2001, 0x0db8, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001,
]; ];
ipv6_arr.write(&mut input[len..]).unwrap(); len += ipv6_arr.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1"); assert_eq!(captured_logs[0].body, "ipv6: 2001:db8::1:1");
@ -716,10 +825,12 @@ mod test {
len += DisplayHint::LowerMac.write(&mut input[len..]).unwrap(); len += DisplayHint::LowerMac.write(&mut input[len..]).unwrap();
// 00:00:5e:00:53:af as byte array // 00:00:5e:00:53:af as byte array
let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf]; let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf];
mac_arr.write(&mut input[len..]).unwrap(); len += mac_arr.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "mac: 00:00:5e:00:53:af"); assert_eq!(captured_logs[0].body, "mac: 00:00:5e:00:53:af");
@ -736,10 +847,12 @@ mod test {
len += DisplayHint::UpperMac.write(&mut input[len..]).unwrap(); len += DisplayHint::UpperMac.write(&mut input[len..]).unwrap();
// 00:00:5E:00:53:AF as byte array // 00:00:5E:00:53:AF as byte array
let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf]; let mac_arr: [u8; 6] = [0x00, 0x00, 0x5e, 0x00, 0x53, 0xaf];
mac_arr.write(&mut input[len..]).unwrap(); len += mac_arr.write(&mut input[len..]).unwrap();
_ = len;
let logger = logger(); let logger = logger();
let _ = log_buf(&input, logger); let () = log_buf(&input, logger).unwrap();
testing_logger::validate(|captured_logs| { testing_logger::validate(|captured_logs| {
assert_eq!(captured_logs.len(), 1); assert_eq!(captured_logs.len(), 1);
assert_eq!(captured_logs[0].body, "mac: 00:00:5E:00:53:AF"); assert_eq!(captured_logs[0].body, "mac: 00:00:5E:00:53:AF");

@ -13,15 +13,14 @@ edition = "2021"
[dependencies] [dependencies]
bytes = "1" bytes = "1"
log = "0.4" log = "0.4"
object = { version = "0.30", default-features = false, features = ["read_core", "elf"] } object = { version = "0.31", default-features = false, features = ["read_core", "elf"] }
hashbrown = { version = "0.13", optional = true } hashbrown = { version = "0.13" }
thiserror-std = { package = "thiserror", version = "1" } thiserror = { version = "1", default-features = false }
thiserror-core = { version = "1", default-features = false, features = [], optional = true } core-error = { version = "0.0.0" }
[dev-dependencies] [dev-dependencies]
matches = "0.1.8" matches = "0.1.8"
rbpf = "0.1.0" rbpf = "0.1.0"
[features] [features]
default = [] std = []
no_std = ["hashbrown", "thiserror-core"]

@ -21,18 +21,20 @@ use crate::{
IntEncoding, LineInfo, Struct, Typedef, VarLinkage, IntEncoding, LineInfo, Struct, Typedef, VarLinkage,
}, },
generated::{btf_ext_header, btf_header}, generated::{btf_ext_header, btf_header},
thiserror::{self, Error},
util::{bytes_of, HashMap}, util::{bytes_of, HashMap},
Object, Object,
}; };
#[cfg(not(feature = "std"))]
use crate::std;
pub(crate) const MAX_RESOLVE_DEPTH: u8 = 32; pub(crate) const MAX_RESOLVE_DEPTH: u8 = 32;
pub(crate) const MAX_SPEC_LEN: usize = 64; pub(crate) const MAX_SPEC_LEN: usize = 64;
/// The error type returned when `BTF` operations fail. /// The error type returned when `BTF` operations fail.
#[derive(Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum BtfError { pub enum BtfError {
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
/// Error parsing file /// Error parsing file
#[error("error parsing {path}")] #[error("error parsing {path}")]
FileError { FileError {
@ -126,7 +128,7 @@ pub enum BtfError {
type_id: u32, type_id: u32,
}, },
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
/// Loading the btf failed /// Loading the btf failed
#[error("the BPF_BTF_LOAD syscall failed. Verifier output: {verifier_log}")] #[error("the BPF_BTF_LOAD syscall failed. Verifier output: {verifier_log}")]
LoadError { LoadError {
@ -232,13 +234,13 @@ impl Btf {
} }
/// Loads BTF metadata from `/sys/kernel/btf/vmlinux`. /// Loads BTF metadata from `/sys/kernel/btf/vmlinux`.
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
pub fn from_sys_fs() -> Result<Btf, BtfError> { pub fn from_sys_fs() -> Result<Btf, BtfError> {
Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()) Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default())
} }
/// Loads BTF metadata from the given `path`. /// Loads BTF metadata from the given `path`.
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
pub fn parse_file<P: AsRef<std::path::Path>>( pub fn parse_file<P: AsRef<std::path::Path>>(
path: P, path: P,
endianness: Endianness, endianness: Endianness,
@ -432,6 +434,19 @@ impl Btf {
// Sanitize DATASEC if they are not supported // Sanitize DATASEC if they are not supported
BtfType::DataSec(d) if !features.btf_datasec => { BtfType::DataSec(d) if !features.btf_datasec => {
debug!("{}: not supported. replacing with STRUCT", kind); debug!("{}: not supported. replacing with STRUCT", kind);
// STRUCT aren't allowed to have "." in their name, fixup this if needed.
let mut name_offset = t.name_offset();
let sec_name = self.string_at(name_offset)?;
let name = sec_name.to_string();
// Handle any "." characters in struct names
// Example: ".maps"
let fixed_name = name.replace('.', "_");
if fixed_name != name {
name_offset = self.add_string(fixed_name);
}
let mut members = vec![]; let mut members = vec![];
for member in d.entries.iter() { for member in d.entries.iter() {
let mt = types.type_by_id(member.btf_type).unwrap(); let mt = types.type_by_id(member.btf_type).unwrap();
@ -441,7 +456,9 @@ impl Btf {
offset: member.offset * 8, offset: member.offset * 8,
}) })
} }
types.types[i] = BtfType::Struct(Struct::new(t.name_offset(), members, 0));
types.types[i] =
BtfType::Struct(Struct::new(name_offset, members, d.entries.len() as u32));
} }
// Fixup DATASEC // Fixup DATASEC
// DATASEC sizes aren't always set by LLVM // DATASEC sizes aren't always set by LLVM
@ -514,7 +531,7 @@ impl Btf {
// Fixup FUNC_PROTO // Fixup FUNC_PROTO
BtfType::FuncProto(ty) if features.btf_func => { BtfType::FuncProto(ty) if features.btf_func => {
let mut ty = ty.clone(); let mut ty = ty.clone();
for (i, mut param) in ty.params.iter_mut().enumerate() { for (i, param) in ty.params.iter_mut().enumerate() {
if param.name_offset == 0 && param.btf_type != 0 { if param.name_offset == 0 && param.btf_type != 0 {
param.name_offset = self.add_string(format!("param{i}")); param.name_offset = self.add_string(format!("param{i}"));
} }
@ -536,23 +553,40 @@ impl Btf {
types.types[i] = enum_type; types.types[i] = enum_type;
} }
// Sanitize FUNC // Sanitize FUNC
BtfType::Func(ty) if !features.btf_func => { BtfType::Func(ty) => {
let name = self.string_at(ty.name_offset)?;
// Sanitize FUNC
if !features.btf_func {
debug!("{}: not supported. replacing with TYPEDEF", kind); debug!("{}: not supported. replacing with TYPEDEF", kind);
let typedef_type = BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type)); let typedef_type =
BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type));
types.types[i] = typedef_type; types.types[i] = typedef_type;
} } else if !features.btf_func_global
// Sanitize BTF_FUNC_GLOBAL || name == "memset"
BtfType::Func(ty) if !features.btf_func_global => { || name == "memcpy"
|| name == "memmove"
|| name == "memcmp"
{
// Sanitize BTF_FUNC_GLOBAL when not supported and ensure that
// memory builtins are marked as static. Globals are type checked
// and verified separately from their callers, while instead we
// want tracking info (eg bound checks) to be propagated to the
// memory builtins.
let mut fixed_ty = ty.clone(); let mut fixed_ty = ty.clone();
if ty.linkage() == FuncLinkage::Global { if ty.linkage() == FuncLinkage::Global {
if !features.btf_func_global {
debug!( debug!(
"{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC", "{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC",
kind kind
); );
} else {
debug!("changing FUNC {name} linkage to BTF_FUNC_STATIC");
}
fixed_ty.set_linkage(FuncLinkage::Static); fixed_ty.set_linkage(FuncLinkage::Static);
} }
types.types[i] = BtfType::Func(fixed_ty); types.types[i] = BtfType::Func(fixed_ty);
} }
}
// Sanitize FLOAT // Sanitize FLOAT
BtfType::Float(ty) if !features.btf_float => { BtfType::Float(ty) if !features.btf_float => {
debug!("{}: not supported. replacing with STRUCT", kind); debug!("{}: not supported. replacing with STRUCT", kind);
@ -1116,7 +1150,7 @@ mod tests {
VarLinkage::Static, VarLinkage::Static,
))); )));
let name_offset = btf.add_string(".data".to_string()); let name_offset = btf.add_string("data".to_string());
let variables = vec![DataSecEntry { let variables = vec![DataSecEntry {
btf_type: var_type_id, btf_type: var_type_id,
offset: 0, offset: 0,
@ -1350,6 +1384,60 @@ mod tests {
Btf::parse(&raw, Endianness::default()).unwrap(); Btf::parse(&raw, Endianness::default()).unwrap();
} }
#[test]
fn test_sanitize_mem_builtins() {
let mut btf = Btf::new();
let name_offset = btf.add_string("int".to_string());
let int_type_id = btf.add_type(BtfType::Int(Int::new(
name_offset,
4,
IntEncoding::Signed,
0,
)));
let params = vec![
BtfParam {
name_offset: btf.add_string("a".to_string()),
btf_type: int_type_id,
},
BtfParam {
name_offset: btf.add_string("b".to_string()),
btf_type: int_type_id,
},
];
let func_proto_type_id =
btf.add_type(BtfType::FuncProto(FuncProto::new(params, int_type_id)));
let builtins = ["memset", "memcpy", "memcmp", "memmove"];
for fname in builtins {
let func_name_offset = btf.add_string(fname.to_string());
let func_type_id = btf.add_type(BtfType::Func(Func::new(
func_name_offset,
func_proto_type_id,
FuncLinkage::Global,
)));
let features = BtfFeatures {
btf_func: true,
btf_func_global: true, // to force function name check
..Default::default()
};
btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features)
.unwrap();
if let BtfType::Func(fixed) = btf.type_by_id(func_type_id).unwrap() {
assert!(fixed.linkage() == FuncLinkage::Static);
} else {
panic!("not a func")
}
// Ensure we can convert to bytes and back again
let raw = btf.to_bytes();
Btf::parse(&raw, Endianness::default()).unwrap();
}
}
#[test] #[test]
fn test_sanitize_float() { fn test_sanitize_float() {
let mut btf = Btf::new(); let mut btf = Btf::new();
@ -1442,7 +1530,7 @@ mod tests {
} }
#[test] #[test]
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
#[cfg_attr(miri, ignore)] #[cfg_attr(miri, ignore)]
fn test_read_btf_from_sys_fs() { fn test_read_btf_from_sys_fs() {
let btf = Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()).unwrap(); let btf = Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default()).unwrap();

@ -17,13 +17,15 @@ use crate::{
bpf_core_relo, bpf_core_relo_kind::*, bpf_insn, BPF_ALU, BPF_ALU64, BPF_B, BPF_DW, BPF_H, bpf_core_relo, bpf_core_relo_kind::*, bpf_insn, BPF_ALU, BPF_ALU64, BPF_B, BPF_DW, BPF_H,
BPF_K, BPF_LD, BPF_LDX, BPF_ST, BPF_STX, BPF_W, BTF_INT_SIGNED, BPF_K, BPF_LD, BPF_LDX, BPF_ST, BPF_STX, BPF_W, BTF_INT_SIGNED,
}, },
thiserror::{self, Error},
util::HashMap, util::HashMap,
Object, Program, ProgramSection, Object, Program, ProgramSection,
}; };
#[cfg(not(feature = "std"))]
use crate::std;
/// The error type returned by [`Object::relocate_btf`]. /// The error type returned by [`Object::relocate_btf`].
#[derive(Error, Debug)] #[derive(thiserror::Error, Debug)]
#[error("error relocating `{section}`")] #[error("error relocating `{section}`")]
pub struct BtfRelocationError { pub struct BtfRelocationError {
/// The function name /// The function name
@ -34,9 +36,9 @@ pub struct BtfRelocationError {
} }
/// Relocation failures /// Relocation failures
#[derive(Error, Debug)] #[derive(thiserror::Error, Debug)]
enum RelocationError { enum RelocationError {
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
/// I/O error /// I/O error
#[error(transparent)] #[error(transparent)]
IOError(#[from] std::io::Error), IOError(#[from] std::io::Error),
@ -853,7 +855,7 @@ impl ComputedRelocation {
let instructions = &mut program.function.instructions; let instructions = &mut program.function.instructions;
let num_instructions = instructions.len(); let num_instructions = instructions.len();
let ins_index = rel.ins_offset / mem::size_of::<bpf_insn>(); let ins_index = rel.ins_offset / mem::size_of::<bpf_insn>();
let mut ins = let ins =
instructions instructions
.get_mut(ins_index) .get_mut(ins_index)
.ok_or(RelocationError::InvalidInstructionIndex { .ok_or(RelocationError::InvalidInstructionIndex {
@ -932,7 +934,7 @@ impl ComputedRelocation {
} }
BPF_LD => { BPF_LD => {
ins.imm = target_value as i32; ins.imm = target_value as i32;
let mut next_ins = instructions.get_mut(ins_index + 1).ok_or( let next_ins = instructions.get_mut(ins_index + 1).ok_or(
RelocationError::InvalidInstructionIndex { RelocationError::InvalidInstructionIndex {
index: ins_index + 1, index: ins_index + 1,
num_instructions, num_instructions,

@ -63,16 +63,17 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(clippy::all, missing_docs)] #![deny(clippy::all, missing_docs)]
#![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)] #![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)]
#![cfg_attr(feature = "no_std", feature(error_in_core))]
#[cfg(feature = "no_std")]
pub(crate) use thiserror_core as thiserror;
#[cfg(not(feature = "no_std"))]
pub(crate) use thiserror_std as thiserror;
extern crate alloc; extern crate alloc;
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
extern crate std; extern crate std;
#[cfg(not(feature = "std"))]
mod std {
pub mod error {
pub use core_error::Error;
}
pub use core::*;
}
pub mod btf; pub mod btf;
pub mod generated; pub mod generated;

@ -2,12 +2,12 @@
use core::mem; use core::mem;
use crate::{ use crate::BpfSectionKind;
thiserror::{self, Error},
BpfSectionKind,
};
use alloc::vec::Vec; use alloc::vec::Vec;
#[cfg(not(feature = "std"))]
use crate::std;
/// Invalid map type encontered /// Invalid map type encontered
pub struct InvalidMapTypeError { pub struct InvalidMapTypeError {
/// The map type /// The map type
@ -94,7 +94,7 @@ pub enum PinningType {
} }
/// The error type returned when failing to parse a [PinningType] /// The error type returned when failing to parse a [PinningType]
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
pub enum PinningError { pub enum PinningError {
/// Unsupported pinning type /// Unsupported pinning type
#[error("unsupported pinning type `{pinning_type}`")] #[error("unsupported pinning type `{pinning_type}`")]

@ -15,12 +15,16 @@ use object::{
}; };
use crate::{ use crate::{
btf::BtfFeatures,
generated::{BPF_CALL, BPF_JMP, BPF_K},
maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE}, maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE},
relocation::*, relocation::*,
thiserror::{self, Error},
util::HashMap, util::HashMap,
}; };
#[cfg(not(feature = "std"))]
use crate::std;
use crate::{ use crate::{
btf::{Btf, BtfError, BtfExt, BtfType}, btf::{Btf, BtfError, BtfExt, BtfType},
generated::{bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_F_RDONLY_PROG}, generated::{bpf_insn, bpf_map_info, bpf_map_type::BPF_MAP_TYPE_ARRAY, BPF_F_RDONLY_PROG},
@ -33,6 +37,17 @@ use crate::btf::{Array, DataSecEntry, FuncSecInfo, LineSecInfo};
const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE; const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE;
/// Features implements BPF and BTF feature detection
#[derive(Default, Debug)]
#[allow(missing_docs)]
pub struct Features {
pub bpf_name: bool,
pub bpf_probe_read_kernel: bool,
pub bpf_perf_link: bool,
pub bpf_global_data: bool,
pub btf: Option<BtfFeatures>,
}
/// The loaded object file representation /// The loaded object file representation
#[derive(Clone)] #[derive(Clone)]
pub struct Object { pub struct Object {
@ -147,7 +162,7 @@ pub struct Function {
/// - `uprobe.s+` or `uretprobe.s+` /// - `uprobe.s+` or `uretprobe.s+`
/// - `usdt+` /// - `usdt+`
/// - `kprobe.multi+` or `kretprobe.multi+`: `BPF_TRACE_KPROBE_MULTI` /// - `kprobe.multi+` or `kretprobe.multi+`: `BPF_TRACE_KPROBE_MULTI`
/// - `lsm_cgroup+` or `lsm.s+` /// - `lsm_cgroup+`
/// - `lwt_in`, `lwt_out`, `lwt_seg6local`, `lwt_xmit` /// - `lwt_in`, `lwt_out`, `lwt_seg6local`, `lwt_xmit`
/// - `raw_tp.w+`, `raw_tracepoint.w+` /// - `raw_tp.w+`, `raw_tracepoint.w+`
/// - `action` /// - `action`
@ -182,7 +197,7 @@ pub enum ProgramSection {
}, },
Xdp { Xdp {
name: String, name: String,
frags_supported: bool, frags: bool,
}, },
SkMsg { SkMsg {
name: String, name: String,
@ -230,6 +245,7 @@ pub enum ProgramSection {
}, },
Lsm { Lsm {
name: String, name: String,
sleepable: bool,
}, },
BtfTracePoint { BtfTracePoint {
name: String, name: String,
@ -280,7 +296,7 @@ impl ProgramSection {
ProgramSection::LircMode2 { name } => name, ProgramSection::LircMode2 { name } => name,
ProgramSection::PerfEvent { name } => name, ProgramSection::PerfEvent { name } => name,
ProgramSection::RawTracePoint { name } => name, ProgramSection::RawTracePoint { name } => name,
ProgramSection::Lsm { name } => name, ProgramSection::Lsm { name, .. } => name,
ProgramSection::BtfTracePoint { name } => name, ProgramSection::BtfTracePoint { name } => name,
ProgramSection::FEntry { name } => name, ProgramSection::FEntry { name } => name,
ProgramSection::FExit { name } => name, ProgramSection::FExit { name } => name,
@ -312,14 +328,8 @@ impl FromStr for ProgramSection {
"kretprobe" => KRetProbe { name }, "kretprobe" => KRetProbe { name },
"uprobe" => UProbe { name }, "uprobe" => UProbe { name },
"uretprobe" => URetProbe { name }, "uretprobe" => URetProbe { name },
"xdp" => Xdp { "xdp" => Xdp { name, frags: false },
name, "xdp.frags" => Xdp { name, frags: true },
frags_supported: false,
},
"xdp.frags" => Xdp {
name,
frags_supported: true,
},
"tp_btf" => BtfTracePoint { name }, "tp_btf" => BtfTracePoint { name },
_ if kind.starts_with("tracepoint") || kind.starts_with("tp") => { _ if kind.starts_with("tracepoint") || kind.starts_with("tp") => {
// tracepoint sections are named `tracepoint/category/event_name`, // tracepoint sections are named `tracepoint/category/event_name`,
@ -471,7 +481,14 @@ impl FromStr for ProgramSection {
"lirc_mode2" => LircMode2 { name }, "lirc_mode2" => LircMode2 { name },
"perf_event" => PerfEvent { name }, "perf_event" => PerfEvent { name },
"raw_tp" | "raw_tracepoint" => RawTracePoint { name }, "raw_tp" | "raw_tracepoint" => RawTracePoint { name },
"lsm" => Lsm { name }, "lsm" => Lsm {
name,
sleepable: false,
},
"lsm.s" => Lsm {
name,
sleepable: true,
},
"fentry" => FEntry { name }, "fentry" => FEntry { name },
"fexit" => FExit { name }, "fexit" => FExit { name },
"freplace" => Extension { name }, "freplace" => Extension { name },
@ -878,6 +895,52 @@ impl Object {
Ok(()) Ok(())
} }
/// Sanitize BPF programs.
pub fn sanitize_programs(&mut self, features: &Features) {
for program in self.programs.values_mut() {
program.sanitize(features);
}
}
}
fn insn_is_helper_call(ins: &bpf_insn) -> bool {
let klass = (ins.code & 0x07) as u32;
let op = (ins.code & 0xF0) as u32;
let src = (ins.code & 0x08) as u32;
klass == BPF_JMP && op == BPF_CALL && src == BPF_K && ins.src_reg() == 0 && ins.dst_reg() == 0
}
const BPF_FUNC_PROBE_READ: i32 = 4;
const BPF_FUNC_PROBE_READ_STR: i32 = 45;
const BPF_FUNC_PROBE_READ_USER: i32 = 112;
const BPF_FUNC_PROBE_READ_KERNEL: i32 = 113;
const BPF_FUNC_PROBE_READ_USER_STR: i32 = 114;
const BPF_FUNC_PROBE_READ_KERNEL_STR: i32 = 115;
impl Program {
fn sanitize(&mut self, features: &Features) {
for inst in &mut self.function.instructions {
if !insn_is_helper_call(inst) {
continue;
}
match inst.imm {
BPF_FUNC_PROBE_READ_USER | BPF_FUNC_PROBE_READ_KERNEL
if !features.bpf_probe_read_kernel =>
{
inst.imm = BPF_FUNC_PROBE_READ;
}
BPF_FUNC_PROBE_READ_USER_STR | BPF_FUNC_PROBE_READ_KERNEL_STR
if !features.bpf_probe_read_kernel =>
{
inst.imm = BPF_FUNC_PROBE_READ_STR;
}
_ => {}
}
}
}
} }
// Parses multiple map definition contained in a single `maps` section (which is // Parses multiple map definition contained in a single `maps` section (which is
@ -920,7 +983,7 @@ fn parse_maps_section<'a, I: Iterator<Item = &'a Symbol>>(
} }
/// Errors caught during parsing the object file /// Errors caught during parsing the object file
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
#[allow(missing_docs)] #[allow(missing_docs)]
pub enum ParseError { pub enum ParseError {
#[error("error parsing ELF data")] #[error("error parsing ELF data")]
@ -1031,11 +1094,12 @@ impl BpfSectionKind {
BpfSectionKind::BtfMaps BpfSectionKind::BtfMaps
} else if name.starts_with(".text") { } else if name.starts_with(".text") {
BpfSectionKind::Text BpfSectionKind::Text
} else if name.starts_with(".bss") } else if name.starts_with(".bss") {
|| name.starts_with(".data") BpfSectionKind::Bss
|| name.starts_with(".rodata") } else if name.starts_with(".data") {
{
BpfSectionKind::Data BpfSectionKind::Data
} else if name.starts_with(".rodata") {
BpfSectionKind::Rodata
} else if name == ".BTF" { } else if name == ".BTF" {
BpfSectionKind::Btf BpfSectionKind::Btf
} else if name == ".BTF.ext" { } else if name == ".BTF.ext" {
@ -1816,7 +1880,7 @@ mod tests {
assert_matches!( assert_matches!(
obj.programs.get("foo"), obj.programs.get("foo"),
Some(Program { Some(Program {
section: ProgramSection::Xdp { .. }, section: ProgramSection::Xdp { frags: false, .. },
.. ..
}) })
); );
@ -1837,10 +1901,7 @@ mod tests {
assert_matches!( assert_matches!(
obj.programs.get("foo"), obj.programs.get("foo"),
Some(Program { Some(Program {
section: ProgramSection::Xdp { section: ProgramSection::Xdp { frags: true, .. },
frags_supported: true,
..
},
.. ..
}) })
); );
@ -1898,7 +1959,34 @@ mod tests {
assert_matches!( assert_matches!(
obj.programs.get("foo"), obj.programs.get("foo"),
Some(Program { Some(Program {
section: ProgramSection::Lsm { .. }, section: ProgramSection::Lsm {
sleepable: false,
..
},
..
})
);
}
#[test]
fn test_parse_section_lsm_sleepable() {
let mut obj = fake_obj();
assert_matches!(
obj.parse_section(fake_section(
BpfSectionKind::Program,
"lsm.s/foo",
bytes_of(&fake_ins())
)),
Ok(())
);
assert_matches!(
obj.programs.get("foo"),
Some(Program {
section: ProgramSection::Lsm {
sleepable: true,
..
},
.. ..
}) })
); );

@ -1,10 +1,10 @@
//! Cgroup socket programs. //! Cgroup socket programs.
use alloc::{borrow::ToOwned, string::String}; use alloc::{borrow::ToOwned, string::String};
use crate::{ use crate::generated::bpf_attach_type;
generated::bpf_attach_type,
thiserror::{self, Error}, #[cfg(not(feature = "std"))]
}; use crate::std;
/// Defines where to attach a `CgroupSock` program. /// Defines where to attach a `CgroupSock` program.
#[derive(Copy, Clone, Debug, Default)] #[derive(Copy, Clone, Debug, Default)]
@ -31,7 +31,7 @@ impl From<CgroupSockAttachType> for bpf_attach_type {
} }
} }
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCK program")] #[error("{0} is not a valid attach type for a CGROUP_SOCK program")]
pub(crate) struct InvalidAttachType(String); pub(crate) struct InvalidAttachType(String);

@ -1,10 +1,10 @@
//! Cgroup socket address programs. //! Cgroup socket address programs.
use alloc::{borrow::ToOwned, string::String}; use alloc::{borrow::ToOwned, string::String};
use crate::{ use crate::generated::bpf_attach_type;
generated::bpf_attach_type,
thiserror::{self, Error}, #[cfg(not(feature = "std"))]
}; use crate::std;
/// Defines where to attach a `CgroupSockAddr` program. /// Defines where to attach a `CgroupSockAddr` program.
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -54,7 +54,7 @@ impl From<CgroupSockAddrAttachType> for bpf_attach_type {
} }
} }
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCK_ADDR program")] #[error("{0} is not a valid attach type for a CGROUP_SOCK_ADDR program")]
pub(crate) struct InvalidAttachType(String); pub(crate) struct InvalidAttachType(String);

@ -1,10 +1,10 @@
//! Cgroup socket option programs. //! Cgroup socket option programs.
use alloc::{borrow::ToOwned, string::String}; use alloc::{borrow::ToOwned, string::String};
use crate::{ use crate::generated::bpf_attach_type;
generated::bpf_attach_type,
thiserror::{self, Error}, #[cfg(not(feature = "std"))]
}; use crate::std;
/// Defines where to attach a `CgroupSockopt` program. /// Defines where to attach a `CgroupSockopt` program.
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -24,7 +24,7 @@ impl From<CgroupSockoptAttachType> for bpf_attach_type {
} }
} }
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
#[error("{0} is not a valid attach type for a CGROUP_SOCKOPT program")] #[error("{0} is not a valid attach type for a CGROUP_SOCKOPT program")]
pub(crate) struct InvalidAttachType(String); pub(crate) struct InvalidAttachType(String);

@ -1,7 +1,6 @@
//! Program relocation handling. //! Program relocation handling.
use core::mem; use core::mem;
use std::collections::HashSet;
use alloc::{borrow::ToOwned, string::String}; use alloc::{borrow::ToOwned, string::String};
use log::debug; use log::debug;
@ -14,15 +13,17 @@ use crate::{
}, },
maps::Map, maps::Map,
obj::{Function, Object, Program}, obj::{Function, Object, Program},
thiserror::{self, Error}, util::{HashMap, HashSet},
util::HashMap,
BpfSectionKind, BpfSectionKind,
}; };
#[cfg(not(feature = "std"))]
use crate::std;
pub(crate) const INS_SIZE: usize = mem::size_of::<bpf_insn>(); pub(crate) const INS_SIZE: usize = mem::size_of::<bpf_insn>();
/// The error type returned by [`Object::relocate_maps`] and [`Object::relocate_calls`] /// The error type returned by [`Object::relocate_maps`] and [`Object::relocate_calls`]
#[derive(Error, Debug)] #[derive(thiserror::Error, Debug)]
#[error("error relocating `{function}`")] #[error("error relocating `{function}`")]
pub struct BpfRelocationError { pub struct BpfRelocationError {
/// The function name /// The function name
@ -33,7 +34,7 @@ pub struct BpfRelocationError {
} }
/// Relocation failures /// Relocation failures
#[derive(Debug, Error)] #[derive(Debug, thiserror::Error)]
pub enum RelocationError { pub enum RelocationError {
/// Unknown symbol /// Unknown symbol
#[error("unknown symbol, index `{index}`")] #[error("unknown symbol, index `{index}`")]
@ -428,7 +429,7 @@ impl<'a> FunctionLinker<'a> {
let callee_ins_index = self.link_function(program, callee)? as i32; let callee_ins_index = self.link_function(program, callee)? as i32;
let mut ins = &mut program.instructions[ins_index]; let ins = &mut program.instructions[ins_index];
let ins_index = ins_index as i32; let ins_index = ins_index as i32;
ins.imm = callee_ins_index - ins_index - 1; ins.imm = callee_ins_index - ins_index - 1;
debug!( debug!(
@ -589,8 +590,6 @@ mod test {
assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8);
assert_eq!(fun.instructions[0].imm, 1); assert_eq!(fun.instructions[0].imm, 1);
mem::forget(map);
} }
#[test] #[test]
@ -650,9 +649,6 @@ mod test {
assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8);
assert_eq!(fun.instructions[1].imm, 2); assert_eq!(fun.instructions[1].imm, 2);
mem::forget(map_1);
mem::forget(map_2);
} }
#[test] #[test]
@ -689,8 +685,6 @@ mod test {
assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[0].src_reg(), BPF_PSEUDO_MAP_FD as u8);
assert_eq!(fun.instructions[0].imm, 1); assert_eq!(fun.instructions[0].imm, 1);
mem::forget(map);
} }
#[test] #[test]
@ -750,8 +744,5 @@ mod test {
assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8); assert_eq!(fun.instructions[1].src_reg(), BPF_PSEUDO_MAP_FD as u8);
assert_eq!(fun.instructions[1].imm, 2); assert_eq!(fun.instructions[1].imm, 2);
mem::forget(map_1);
mem::forget(map_2);
} }
} }

@ -1,10 +1,15 @@
use core::{mem, slice}; use core::{mem, slice};
#[cfg(feature = "no_std")] #[cfg(not(feature = "std"))]
pub(crate) use hashbrown::HashMap; pub(crate) use hashbrown::HashMap;
#[cfg(not(feature = "no_std"))] #[cfg(feature = "std")]
pub(crate) use std::collections::HashMap; pub(crate) use std::collections::HashMap;
#[cfg(not(feature = "std"))]
pub(crate) use hashbrown::HashSet;
#[cfg(feature = "std")]
pub(crate) use std::collections::HashSet;
/// bytes_of converts a <T> to a byte slice /// bytes_of converts a <T> to a byte slice
pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] { pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] {
let size = mem::size_of::<T>(); let size = mem::size_of::<T>();

@ -5,7 +5,7 @@ authors = ["Alessandro Decina <alessandro.d@gmail.com>"]
edition = "2021" edition = "2021"
[dependencies] [dependencies]
bindgen = "0.64" bindgen = "0.65"
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
anyhow = "1" anyhow = "1"
thiserror = "1" thiserror = "1"

@ -12,10 +12,10 @@ edition = "2021"
[dependencies] [dependencies]
libc = { version = "0.2.105" } libc = { version = "0.2.105" }
aya-obj = { path = "../aya-obj", version = "0.1.0" } aya-obj = { path = "../aya-obj", version = "0.1.0", features = ["std"] }
thiserror = "1" thiserror = "1"
object = { version = "0.30", default-features = false, features = ["std", "read_core", "elf"] } object = { version = "0.31", default-features = false, features = ["std", "read_core", "elf"] }
bitflags = "1.2.1" bitflags = "2.2.1"
bytes = "1" bytes = "1"
lazy_static = "1" lazy_static = "1"
parking_lot = { version = "0.12.0", features = ["send_guard"] } parking_lot = { version = "0.12.0", features = ["send_guard"] }

@ -9,9 +9,9 @@ use std::{
use aya_obj::{ use aya_obj::{
btf::{BtfFeatures, BtfRelocationError}, btf::{BtfFeatures, BtfRelocationError},
generated::BPF_F_XDP_HAS_FRAGS, generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS},
relocation::BpfRelocationError, relocation::BpfRelocationError,
BpfSectionKind, BpfSectionKind, Features,
}; };
use log::debug; use log::debug;
use thiserror::Error; use thiserror::Error;
@ -33,9 +33,10 @@ use crate::{
SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp, SkMsg, SkSkb, SkSkbKind, SockOps, SocketFilter, TracePoint, UProbe, Xdp,
}, },
sys::{ sys::{
bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_btf_datasec_supported, bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_bpf_global_data_supported,
is_btf_decl_tag_supported, is_btf_float_supported, is_btf_func_global_supported, is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_float_supported,
is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported, is_btf_func_global_supported, is_btf_func_supported, is_btf_supported,
is_btf_type_tag_supported, is_perf_link_supported, is_probe_read_kernel_supported,
is_prog_name_supported, retry_with_verifier_logs, is_prog_name_supported, retry_with_verifier_logs,
}, },
util::{bytes_of, bytes_of_slice, possible_cpus, VerifierLog, POSSIBLE_CPUS}, util::{bytes_of, bytes_of_slice, possible_cpus, VerifierLog, POSSIBLE_CPUS},
@ -66,19 +67,10 @@ unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}
pub use aya_obj::maps::{bpf_map_def, PinningType}; pub use aya_obj::maps::{bpf_map_def, PinningType};
lazy_static! { lazy_static! {
pub(crate) static ref FEATURES: Features = Features::new(); pub(crate) static ref FEATURES: Features = detect_features();
} }
// Features implements BPF and BTF feature detection fn detect_features() -> Features {
#[derive(Default, Debug)]
pub(crate) struct Features {
pub bpf_name: bool,
pub bpf_perf_link: bool,
pub btf: Option<BtfFeatures>,
}
impl Features {
fn new() -> Self {
let btf = if is_btf_supported() { let btf = if is_btf_supported() {
Some(BtfFeatures { Some(BtfFeatures {
btf_func: is_btf_func_supported(), btf_func: is_btf_func_supported(),
@ -93,12 +85,13 @@ impl Features {
}; };
let f = Features { let f = Features {
bpf_name: is_prog_name_supported(), bpf_name: is_prog_name_supported(),
bpf_probe_read_kernel: is_probe_read_kernel_supported(),
bpf_perf_link: is_perf_link_supported(), bpf_perf_link: is_perf_link_supported(),
bpf_global_data: is_bpf_global_data_supported(),
btf, btf,
}; };
debug!("BPF Feature Detection: {:#?}", f); debug!("BPF Feature Detection: {:#?}", f);
f f
}
} }
/// Builder style API for advanced loading of eBPF programs. /// Builder style API for advanced loading of eBPF programs.
@ -134,13 +127,14 @@ pub struct BpfLoader<'a> {
bitflags! { bitflags! {
/// Used to set the verifier log level flags in [BpfLoader](BpfLoader::verifier_log_level()). /// Used to set the verifier log level flags in [BpfLoader](BpfLoader::verifier_log_level()).
#[derive(Debug)]
pub struct VerifierLogLevel: u32 { pub struct VerifierLogLevel: u32 {
/// Sets no verifier logging. /// Sets no verifier logging.
const DISABLE = 0; const DISABLE = 0;
/// Enables debug verifier logging. /// Enables debug verifier logging.
const DEBUG = 1; const DEBUG = 1;
/// Enables verbose verifier logging. /// Enables verbose verifier logging.
const VERBOSE = 2 | Self::DEBUG.bits; const VERBOSE = 2 | Self::DEBUG.bits();
/// Enables verifier stats. /// Enables verifier stats.
const STATS = 4; const STATS = 4;
} }
@ -148,9 +142,7 @@ bitflags! {
impl Default for VerifierLogLevel { impl Default for VerifierLogLevel {
fn default() -> Self { fn default() -> Self {
Self { Self::DEBUG | Self::STATS
bits: Self::DEBUG.bits | Self::STATS.bits,
}
} }
} }
@ -348,7 +340,7 @@ impl<'a> BpfLoader<'a> {
/// # Ok::<(), aya::BpfError>(()) /// # Ok::<(), aya::BpfError>(())
/// ``` /// ```
pub fn load(&mut self, data: &[u8]) -> Result<Bpf, BpfError> { pub fn load(&mut self, data: &[u8]) -> Result<Bpf, BpfError> {
let verifier_log_level = self.verifier_log_level.bits; let verifier_log_level = self.verifier_log_level.bits();
let mut obj = Object::parse(data)?; let mut obj = Object::parse(data)?;
obj.patch_map_data(self.globals.clone())?; obj.patch_map_data(self.globals.clone())?;
@ -368,6 +360,12 @@ impl<'a> BpfLoader<'a> {
} }
let mut maps = HashMap::new(); let mut maps = HashMap::new();
for (name, mut obj) in obj.maps.drain() { for (name, mut obj) in obj.maps.drain() {
if let (false, BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata) =
(FEATURES.bpf_global_data, obj.section_kind())
{
continue;
}
match self.max_entries.get(name.as_str()) { match self.max_entries.get(name.as_str()) {
Some(size) => obj.set_max_entries(*size), Some(size) => obj.set_max_entries(*size),
None => { None => {
@ -443,6 +441,7 @@ impl<'a> BpfLoader<'a> {
&text_sections, &text_sections,
)?; )?;
obj.relocate_calls(&text_sections)?; obj.relocate_calls(&text_sections)?;
obj.sanitize_programs(&FEATURES);
let programs = obj let programs = obj
.programs .programs
@ -485,12 +484,10 @@ impl<'a> BpfLoader<'a> {
data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level),
}) })
} }
ProgramSection::Xdp { ProgramSection::Xdp { frags, .. } => {
frags_supported, ..
} => {
let mut data = let mut data =
ProgramData::new(prog_name, obj, btf_fd, verifier_log_level); ProgramData::new(prog_name, obj, btf_fd, verifier_log_level);
if *frags_supported { if *frags {
data.flags = BPF_F_XDP_HAS_FRAGS; data.flags = BPF_F_XDP_HAS_FRAGS;
} }
Program::Xdp(Xdp { data }) Program::Xdp(Xdp { data })
@ -558,9 +555,14 @@ impl<'a> BpfLoader<'a> {
data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level),
}) })
} }
ProgramSection::Lsm { .. } => Program::Lsm(Lsm { ProgramSection::Lsm { sleepable, .. } => {
data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), let mut data =
}), ProgramData::new(prog_name, obj, btf_fd, verifier_log_level);
if *sleepable {
data.flags = BPF_F_SLEEPABLE;
}
Program::Lsm(Lsm { data })
}
ProgramSection::BtfTracePoint { .. } => { ProgramSection::BtfTracePoint { .. } => {
Program::BtfTracePoint(BtfTracePoint { Program::BtfTracePoint(BtfTracePoint {
data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level), data: ProgramData::new(prog_name, obj, btf_fd, verifier_log_level),

@ -1,6 +1,5 @@
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -35,9 +34,9 @@ pub struct Array<T, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, V: Pod> Array<T, V> { impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub(crate) fn new(map: T) -> Result<Array<T, V>, MapError> { pub(crate) fn new(map: T) -> Result<Array<T, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<u32, V>(data)?; check_kv_size::<u32, V>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -52,7 +51,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
/// ///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 { pub fn len(&self) -> u32 {
self.inner.as_ref().obj.max_entries() self.inner.borrow().obj.max_entries()
} }
/// Returns the value stored at the given index. /// Returns the value stored at the given index.
@ -62,7 +61,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails. /// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> { pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> {
let data = self.inner.as_ref(); let data = self.inner.borrow();
check_bounds(data, *index)?; check_bounds(data, *index)?;
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
@ -82,7 +81,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
} }
} }
impl<T: AsMut<MapData>, V: Pod> Array<T, V> { impl<T: BorrowMut<MapData>, V: Pod> Array<T, V> {
/// Sets the value of the element at the given index. /// Sets the value of the element at the given index.
/// ///
/// # Errors /// # Errors
@ -90,7 +89,7 @@ impl<T: AsMut<MapData>, V: Pod> Array<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails. /// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> { pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
check_bounds(data, index)?; check_bounds(data, index)?;
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| { bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| {
@ -103,9 +102,9 @@ impl<T: AsMut<MapData>, V: Pod> Array<T, V> {
} }
} }
impl<T: AsRef<MapData>, V: Pod> IterableMap<u32, V> for Array<T, V> { impl<T: Borrow<MapData>, V: Pod> IterableMap<u32, V> for Array<T, V> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, index: &u32) -> Result<V, MapError> { fn get(&self, index: &u32) -> Result<V, MapError> {

@ -1,5 +1,5 @@
use std::{ use std::{
convert::{AsMut, AsRef}, borrow::{Borrow, BorrowMut},
marker::PhantomData, marker::PhantomData,
}; };
@ -53,9 +53,9 @@ pub struct PerCpuArray<T, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> { impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
pub(crate) fn new(map: T) -> Result<PerCpuArray<T, V>, MapError> { pub(crate) fn new(map: T) -> Result<PerCpuArray<T, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<u32, V>(data)?; check_kv_size::<u32, V>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -70,7 +70,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
/// ///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 { pub fn len(&self) -> u32 {
self.inner.as_ref().obj.max_entries() self.inner.borrow().obj.max_entries()
} }
/// Returns a slice of values - one for each CPU - stored at the given index. /// Returns a slice of values - one for each CPU - stored at the given index.
@ -80,7 +80,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails. /// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: &u32, flags: u64) -> Result<PerCpuValues<V>, MapError> { pub fn get(&self, index: &u32, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let data = self.inner.as_ref(); let data = self.inner.borrow();
check_bounds(data, *index)?; check_bounds(data, *index)?;
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
@ -100,7 +100,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
} }
} }
impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> { impl<T: BorrowMut<MapData>, V: Pod> PerCpuArray<T, V> {
/// Sets the values - one for each CPU - at the given index. /// Sets the values - one for each CPU - at the given index.
/// ///
/// # Errors /// # Errors
@ -108,7 +108,7 @@ impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails. /// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, values: PerCpuValues<V>, flags: u64) -> Result<(), MapError> { pub fn set(&mut self, index: u32, values: PerCpuValues<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
check_bounds(data, index)?; check_bounds(data, index)?;
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
@ -122,9 +122,9 @@ impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> {
} }
} }
impl<T: AsRef<MapData>, V: Pod> IterableMap<u32, PerCpuValues<V>> for PerCpuArray<T, V> { impl<T: Borrow<MapData>, V: Pod> IterableMap<u32, PerCpuValues<V>> for PerCpuArray<T, V> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, index: &u32) -> Result<PerCpuValues<V>, MapError> { fn get(&self, index: &u32) -> Result<PerCpuValues<V>, MapError> {

@ -1,7 +1,7 @@
//! An array of eBPF program file descriptors used as a jump table. //! An array of eBPF program file descriptors used as a jump table.
use std::{ use std::{
convert::{AsMut, AsRef}, borrow::{Borrow, BorrowMut},
os::unix::prelude::{AsRawFd, RawFd}, os::unix::prelude::{AsRawFd, RawFd},
}; };
@ -51,9 +51,9 @@ pub struct ProgramArray<T> {
inner: T, inner: T,
} }
impl<T: AsRef<MapData>> ProgramArray<T> { impl<T: Borrow<MapData>> ProgramArray<T> {
pub(crate) fn new(map: T) -> Result<ProgramArray<T>, MapError> { pub(crate) fn new(map: T) -> Result<ProgramArray<T>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?; check_kv_size::<u32, RawFd>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -64,17 +64,17 @@ impl<T: AsRef<MapData>> ProgramArray<T> {
/// An iterator over the indices of the array that point to a program. The iterator item type /// An iterator over the indices of the array that point to a program. The iterator item type
/// is `Result<u32, MapError>`. /// is `Result<u32, MapError>`.
pub fn indices(&self) -> MapKeys<'_, u32> { pub fn indices(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
} }
impl<T: AsMut<MapData>> ProgramArray<T> { impl<T: BorrowMut<MapData>> ProgramArray<T> {
/// Sets the target program file descriptor for the given index in the jump table. /// Sets the target program file descriptor for the given index in the jump table.
/// ///
/// When an eBPF program calls `bpf_tail_call(ctx, prog_array, index)`, control /// When an eBPF program calls `bpf_tail_call(ctx, prog_array, index)`, control
/// flow will jump to `program`. /// flow will jump to `program`.
pub fn set(&mut self, index: u32, program: ProgramFd, flags: u64) -> Result<(), MapError> { pub fn set(&mut self, index: u32, program: ProgramFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
check_bounds(data, index)?; check_bounds(data, index)?;
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
let prog_fd = program.as_raw_fd(); let prog_fd = program.as_raw_fd();
@ -93,9 +93,9 @@ impl<T: AsMut<MapData>> ProgramArray<T> {
/// Calling `bpf_tail_call(ctx, prog_array, index)` on an index that has been cleared returns an /// Calling `bpf_tail_call(ctx, prog_array, index)` on an index that has been cleared returns an
/// error. /// error.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
check_bounds(data, *index)?; check_bounds(data, *index)?;
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow_mut().fd_or_err()?;
bpf_map_delete_elem(fd, index) bpf_map_delete_elem(fd, index)
.map(|_| ()) .map(|_| ())

@ -1,5 +1,5 @@
//! A Bloom Filter. //! A Bloom Filter.
use std::{borrow::Borrow, convert::AsRef, marker::PhantomData}; use std::{borrow::Borrow, marker::PhantomData};
use crate::{ use crate::{
maps::{check_v_size, MapData, MapError}, maps::{check_v_size, MapData, MapError},
@ -35,9 +35,9 @@ pub struct BloomFilter<T, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> { impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
pub(crate) fn new(map: T) -> Result<BloomFilter<T, V>, MapError> { pub(crate) fn new(map: T) -> Result<BloomFilter<T, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_v_size::<V>(data)?; check_v_size::<V>(data)?;
let _ = data.fd_or_err()?; let _ = data.fd_or_err()?;
@ -50,7 +50,7 @@ impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> {
/// Query the existence of the element. /// Query the existence of the element.
pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> { pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_lookup_elem_ptr::<u32, _>(fd, None, &mut value, flags) bpf_map_lookup_elem_ptr::<u32, _>(fd, None, &mut value, flags)
.map_err(|(_, io_error)| MapError::SyscallError { .map_err(|(_, io_error)| MapError::SyscallError {
@ -63,7 +63,7 @@ impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> {
/// Inserts a value into the map. /// Inserts a value into the map.
pub fn insert(&self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> { pub fn insert(&self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_push_elem".to_owned(), call: "bpf_map_push_elem".to_owned(),

@ -1,6 +1,5 @@
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -39,9 +38,9 @@ pub struct HashMap<T, K, V> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<HashMap<T, K, V>, MapError> { pub(crate) fn new(map: T) -> Result<HashMap<T, K, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<K, V>(data)?; check_kv_size::<K, V>(data)?;
let _ = data.fd_or_err()?; let _ = data.fd_or_err()?;
@ -54,7 +53,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Returns a copy of the value associated with the key. /// Returns a copy of the value associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<V, MapError> { pub fn get(&self, key: &K, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(), call: "bpf_map_lookup_elem".to_owned(),
@ -73,11 +72,11 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element /// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`. /// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> { pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
} }
impl<T: AsMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> { impl<T: BorrowMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Inserts a key-value pair into the map. /// Inserts a key-value pair into the map.
pub fn insert( pub fn insert(
&mut self, &mut self,
@ -85,18 +84,18 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
value: impl Borrow<V>, value: impl Borrow<V>,
flags: u64, flags: u64,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
hash_map::insert(self.inner.as_mut(), key.borrow(), value.borrow(), flags) hash_map::insert(self.inner.borrow_mut(), key.borrow(), value.borrow(), flags)
} }
/// Removes a key from the map. /// Removes a key from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> { pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key) hash_map::remove(self.inner.borrow_mut(), key)
} }
} }
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<K, V> for HashMap<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, V> for HashMap<T, K, V> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, key: &K) -> Result<V, MapError> { fn get(&self, key: &K) -> Result<V, MapError> {

@ -1,7 +1,6 @@
//! Per-CPU hash map. //! Per-CPU hash map.
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -48,9 +47,9 @@ pub struct PerCpuHashMap<T, K: Pod, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<PerCpuHashMap<T, K, V>, MapError> { pub(crate) fn new(map: T) -> Result<PerCpuHashMap<T, K, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<K, V>(data)?; check_kv_size::<K, V>(data)?;
let _ = data.fd_or_err()?; let _ = data.fd_or_err()?;
@ -64,7 +63,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Returns a slice of values - one for each CPU - associated with the key. /// Returns a slice of values - one for each CPU - associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<PerCpuValues<V>, MapError> { pub fn get(&self, key: &K, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let values = bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| { let values = bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(), call: "bpf_map_lookup_elem".to_owned(),
@ -83,11 +82,11 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element /// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`. /// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> { pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
} }
impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> { impl<T: BorrowMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Inserts a slice of values - one for each CPU - for the given key. /// Inserts a slice of values - one for each CPU - for the given key.
/// ///
/// # Examples /// # Examples
@ -122,7 +121,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
values: PerCpuValues<V>, values: PerCpuValues<V>,
flags: u64, flags: u64,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow_mut().fd_or_err()?;
bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err( bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err(
|(_, io_error)| MapError::SyscallError { |(_, io_error)| MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(), call: "bpf_map_update_elem".to_owned(),
@ -135,13 +134,15 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Removes a key from the map. /// Removes a key from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> { pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key) hash_map::remove(self.inner.borrow_mut(), key)
} }
} }
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<K, PerCpuValues<V>> for PerCpuHashMap<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, PerCpuValues<V>>
for PerCpuHashMap<T, K, V>
{
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, key: &K) -> Result<PerCpuValues<V>, MapError> { fn get(&self, key: &K) -> Result<PerCpuValues<V>, MapError> {

@ -1,7 +1,6 @@
//! A LPM Trie. //! A LPM Trie.
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -99,9 +98,9 @@ impl<K: Pod> Clone for Key<K> {
// A Pod impl is required as Key struct is a key for a map. // A Pod impl is required as Key struct is a key for a map.
unsafe impl<K: Pod> Pod for Key<K> {} unsafe impl<K: Pod> Pod for Key<K> {}
impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
pub(crate) fn new(map: T) -> Result<LpmTrie<T, K, V>, MapError> { pub(crate) fn new(map: T) -> Result<LpmTrie<T, K, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<Key<K>, V>(data)?; check_kv_size::<Key<K>, V>(data)?;
let _ = data.fd_or_err()?; let _ = data.fd_or_err()?;
@ -115,7 +114,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie. /// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie.
pub fn get(&self, key: &Key<K>, flags: u64) -> Result<V, MapError> { pub fn get(&self, key: &Key<K>, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(), call: "bpf_map_lookup_elem".to_owned(),
@ -134,17 +133,17 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element /// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<Key<K>, MapError>`. /// type is `Result<Key<K>, MapError>`.
pub fn keys(&self) -> MapKeys<'_, Key<K>> { pub fn keys(&self) -> MapKeys<'_, Key<K>> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
/// An iterator visiting all keys matching key. The /// An iterator visiting all keys matching key. The
/// iterator item type is `Result<Key<K>, MapError>`. /// iterator item type is `Result<Key<K>, MapError>`.
pub fn iter_key(&self, key: Key<K>) -> LpmTrieKeys<'_, K> { pub fn iter_key(&self, key: Key<K>) -> LpmTrieKeys<'_, K> {
LpmTrieKeys::new(self.inner.as_ref(), key) LpmTrieKeys::new(self.inner.borrow(), key)
} }
} }
impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> { impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Inserts a key value pair into the map. /// Inserts a key value pair into the map.
pub fn insert( pub fn insert(
&mut self, &mut self,
@ -152,7 +151,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
value: impl Borrow<V>, value: impl Borrow<V>,
flags: u64, flags: u64,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| { bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(), call: "bpf_map_update_elem".to_owned(),
@ -167,7 +166,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// ///
/// Both the prefix and data must match exactly - this method does not do a longest prefix match. /// Both the prefix and data must match exactly - this method does not do a longest prefix match.
pub fn remove(&mut self, key: &Key<K>) -> Result<(), MapError> { pub fn remove(&mut self, key: &Key<K>) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_delete_elem(fd, key) bpf_map_delete_elem(fd, key)
.map(|_| ()) .map(|_| ())
.map_err(|(_, io_error)| MapError::SyscallError { .map_err(|(_, io_error)| MapError::SyscallError {
@ -177,9 +176,9 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
} }
} }
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<Key<K>, V> for LpmTrie<T, K, V> { impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<Key<K>, V> for LpmTrie<T, K, V> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, key: &Key<K>) -> Result<V, MapError> { fn get(&self, key: &Key<K>) -> Result<V, MapError> {

@ -37,7 +37,6 @@
//! versa. Because of that, all map values must be plain old data and therefore //! versa. Because of that, all map values must be plain old data and therefore
//! implement the [Pod] trait. //! implement the [Pod] trait.
use std::{ use std::{
convert::{AsMut, AsRef},
ffi::CString, ffi::CString,
fmt, io, fmt, io,
marker::PhantomData, marker::PhantomData,
@ -481,18 +480,6 @@ pub struct MapData {
pub pinned: bool, pub pinned: bool,
} }
impl AsRef<MapData> for MapData {
fn as_ref(&self) -> &MapData {
self
}
}
impl AsMut<MapData> for MapData {
fn as_mut(&mut self) -> &mut MapData {
self
}
}
impl MapData { impl MapData {
/// Creates a new map with the provided `name` /// Creates a new map with the provided `name`
pub fn create(&mut self, name: &str) -> Result<RawFd, MapError> { pub fn create(&mut self, name: &str) -> Result<RawFd, MapError> {

@ -1,6 +1,6 @@
use bytes::BytesMut; use bytes::BytesMut;
use std::{ use std::{
convert::AsMut, borrow::{Borrow, BorrowMut},
os::unix::prelude::{AsRawFd, RawFd}, os::unix::prelude::{AsRawFd, RawFd},
}; };
@ -89,7 +89,7 @@ pub struct AsyncPerfEventArray<T> {
perf_map: PerfEventArray<T>, perf_map: PerfEventArray<T>,
} }
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArray<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArray<T> {
/// Opens the perf buffer at the given index. /// Opens the perf buffer at the given index.
/// ///
/// The returned buffer will receive all the events eBPF programs send at the given index. /// The returned buffer will receive all the events eBPF programs send at the given index.
@ -112,7 +112,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArray<T> {
} }
} }
impl<T: AsRef<MapData>> AsyncPerfEventArray<T> { impl<T: Borrow<MapData>> AsyncPerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<AsyncPerfEventArray<T>, MapError> { pub(crate) fn new(map: T) -> Result<AsyncPerfEventArray<T>, MapError> {
Ok(AsyncPerfEventArray { Ok(AsyncPerfEventArray {
perf_map: PerfEventArray::new(map)?, perf_map: PerfEventArray::new(map)?,
@ -138,7 +138,7 @@ pub struct AsyncPerfEventArrayBuffer<T> {
} }
#[cfg(any(feature = "async_tokio"))] #[cfg(any(feature = "async_tokio"))]
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer. /// Reads events from the buffer.
/// ///
/// This method reads events into the provided slice of buffers, filling /// This method reads events into the provided slice of buffers, filling
@ -168,7 +168,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> {
} }
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer. /// Reads events from the buffer.
/// ///
/// This method reads events into the provided slice of buffers, filling /// This method reads events into the provided slice of buffers, filling

@ -2,7 +2,7 @@
//! //!
//! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page. //! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page.
use std::{ use std::{
convert::AsMut, borrow::{Borrow, BorrowMut},
ops::Deref, ops::Deref,
os::unix::io::{AsRawFd, RawFd}, os::unix::io::{AsRawFd, RawFd},
sync::Arc, sync::Arc,
@ -31,7 +31,7 @@ pub struct PerfEventArrayBuffer<T> {
buf: PerfBuffer, buf: PerfBuffer,
} }
impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArrayBuffer<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArrayBuffer<T> {
/// Returns true if the buffer contains events that haven't been read. /// Returns true if the buffer contains events that haven't been read.
pub fn readable(&self) -> bool { pub fn readable(&self) -> bool {
self.buf.readable() self.buf.readable()
@ -55,7 +55,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArrayBuffer<T> {
} }
} }
impl<T: AsMut<MapData> + AsRef<MapData>> AsRawFd for PerfEventArrayBuffer<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
fn as_raw_fd(&self) -> RawFd { fn as_raw_fd(&self) -> RawFd {
self.buf.as_raw_fd() self.buf.as_raw_fd()
} }
@ -84,14 +84,14 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
/// ```no_run /// ```no_run
/// # use aya::maps::perf::PerfEventArrayBuffer; /// # use aya::maps::perf::PerfEventArrayBuffer;
/// # use aya::maps::MapData; /// # use aya::maps::MapData;
/// # use std::convert::AsMut; /// # use std::borrow::BorrowMut;
/// # struct Poll<T> { _t: std::marker::PhantomData<T> }; /// # struct Poll<T> { _t: std::marker::PhantomData<T> };
/// # impl<T: AsMut<MapData>> Poll<T> { /// # impl<T: BorrowMut<MapData>> Poll<T> {
/// # fn poll_readable(&self) -> &mut [PerfEventArrayBuffer<T>] { /// # fn poll_readable(&self) -> &mut [PerfEventArrayBuffer<T>] {
/// # &mut [] /// # &mut []
/// # } /// # }
/// # } /// # }
/// # fn poll_buffers<T: AsMut<MapData>>(bufs: Vec<PerfEventArrayBuffer<T>>) -> Poll<T> { /// # fn poll_buffers<T: BorrowMut<MapData>>(bufs: Vec<PerfEventArrayBuffer<T>>) -> Poll<T> {
/// # Poll { _t: std::marker::PhantomData } /// # Poll { _t: std::marker::PhantomData }
/// # } /// # }
/// # #[derive(thiserror::Error, Debug)] /// # #[derive(thiserror::Error, Debug)]
@ -160,9 +160,9 @@ pub struct PerfEventArray<T> {
page_size: usize, page_size: usize,
} }
impl<T: AsRef<MapData>> PerfEventArray<T> { impl<T: Borrow<MapData>> PerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<PerfEventArray<T>, MapError> { pub(crate) fn new(map: T) -> Result<PerfEventArray<T>, MapError> {
let _fd = map.as_ref().fd_or_err()?; let _fd = map.borrow().fd_or_err()?;
Ok(PerfEventArray { Ok(PerfEventArray {
map: Arc::new(map), map: Arc::new(map),
@ -171,7 +171,7 @@ impl<T: AsRef<MapData>> PerfEventArray<T> {
} }
} }
impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArray<T> { impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArray<T> {
/// Opens the perf buffer at the given index. /// Opens the perf buffer at the given index.
/// ///
/// The returned buffer will receive all the events eBPF programs send at the given index. /// The returned buffer will receive all the events eBPF programs send at the given index.
@ -183,7 +183,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArray<T> {
// FIXME: keep track of open buffers // FIXME: keep track of open buffers
// this cannot fail as new() checks that the fd is open // this cannot fail as new() checks that the fd is open
let map_data: &MapData = self.map.deref().as_ref(); let map_data: &MapData = self.map.deref().borrow();
let map_fd = map_data.fd_or_err().unwrap(); let map_fd = map_data.fd_or_err().unwrap();
let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?; let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?;
bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0) bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0)

@ -1,7 +1,6 @@
//! A FIFO queue. //! A FIFO queue.
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -34,9 +33,9 @@ pub struct Queue<T, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, V: Pod> Queue<T, V> { impl<T: Borrow<MapData>, V: Pod> Queue<T, V> {
pub(crate) fn new(map: T) -> Result<Queue<T, V>, MapError> { pub(crate) fn new(map: T) -> Result<Queue<T, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<(), V>(data)?; check_kv_size::<(), V>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -51,11 +50,11 @@ impl<T: AsRef<MapData>, V: Pod> Queue<T, V> {
/// ///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn capacity(&self) -> u32 { pub fn capacity(&self) -> u32 {
self.inner.as_ref().obj.max_entries() self.inner.borrow().obj.max_entries()
} }
} }
impl<T: AsMut<MapData>, V: Pod> Queue<T, V> { impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
/// Removes the first element and returns it. /// Removes the first element and returns it.
/// ///
/// # Errors /// # Errors
@ -63,7 +62,7 @@ impl<T: AsMut<MapData>, V: Pod> Queue<T, V> {
/// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`] /// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails. /// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> { pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err( let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| MapError::SyscallError { |(_, io_error)| MapError::SyscallError {
@ -80,7 +79,7 @@ impl<T: AsMut<MapData>, V: Pod> Queue<T, V> {
/// ///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> { pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_push_elem".to_owned(), call: "bpf_map_push_elem".to_owned(),

@ -1,6 +1,5 @@
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
os::unix::io::{AsRawFd, RawFd}, os::unix::io::{AsRawFd, RawFd},
}; };
@ -69,9 +68,9 @@ pub struct SockHash<T, K> {
_k: PhantomData<K>, _k: PhantomData<K>,
} }
impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> { impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
pub(crate) fn new(map: T) -> Result<SockHash<T, K>, MapError> { pub(crate) fn new(map: T) -> Result<SockHash<T, K>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<K, u32>(data)?; check_kv_size::<K, u32>(data)?;
let _ = data.fd_or_err()?; let _ = data.fd_or_err()?;
@ -83,7 +82,7 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// Returns the fd of the socket stored at the given key. /// Returns the fd of the socket stored at the given key.
pub fn get(&self, key: &K, flags: u64) -> Result<RawFd, MapError> { pub fn get(&self, key: &K, flags: u64) -> Result<RawFd, MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(), call: "bpf_map_lookup_elem".to_owned(),
@ -102,7 +101,7 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// An iterator visiting all keys in arbitrary order. The iterator element /// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`. /// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> { pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
/// Returns the map's file descriptor. /// Returns the map's file descriptor.
@ -110,11 +109,11 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// The returned file descriptor can be used to attach programs that work with /// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> { pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) Ok(SockMapFd(self.inner.borrow().fd_or_err()?))
} }
} }
impl<T: AsMut<MapData>, K: Pod> SockHash<T, K> { impl<T: BorrowMut<MapData>, K: Pod> SockHash<T, K> {
/// Inserts a socket under the given key. /// Inserts a socket under the given key.
pub fn insert<I: AsRawFd>( pub fn insert<I: AsRawFd>(
&mut self, &mut self,
@ -122,18 +121,23 @@ impl<T: AsMut<MapData>, K: Pod> SockHash<T, K> {
value: I, value: I,
flags: u64, flags: u64,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
hash_map::insert(self.inner.as_mut(), key.borrow(), &value.as_raw_fd(), flags) hash_map::insert(
self.inner.borrow_mut(),
key.borrow(),
&value.as_raw_fd(),
flags,
)
} }
/// Removes a socket from the map. /// Removes a socket from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> { pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key) hash_map::remove(self.inner.borrow_mut(), key)
} }
} }
impl<T: AsRef<MapData>, K: Pod> IterableMap<K, RawFd> for SockHash<T, K> { impl<T: Borrow<MapData>, K: Pod> IterableMap<K, RawFd> for SockHash<T, K> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, key: &K) -> Result<RawFd, MapError> { fn get(&self, key: &K) -> Result<RawFd, MapError> {

@ -1,7 +1,7 @@
//! An array of eBPF program file descriptors used as a jump table. //! An array of eBPF program file descriptors used as a jump table.
use std::{ use std::{
convert::{AsMut, AsRef}, borrow::{Borrow, BorrowMut},
os::unix::{io::AsRawFd, prelude::RawFd}, os::unix::{io::AsRawFd, prelude::RawFd},
}; };
@ -44,9 +44,9 @@ pub struct SockMap<T> {
pub(crate) inner: T, pub(crate) inner: T,
} }
impl<T: AsRef<MapData>> SockMap<T> { impl<T: Borrow<MapData>> SockMap<T> {
pub(crate) fn new(map: T) -> Result<SockMap<T>, MapError> { pub(crate) fn new(map: T) -> Result<SockMap<T>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?; check_kv_size::<u32, RawFd>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -57,7 +57,7 @@ impl<T: AsRef<MapData>> SockMap<T> {
/// An iterator over the indices of the array that point to a program. The iterator item type /// An iterator over the indices of the array that point to a program. The iterator item type
/// is `Result<u32, MapError>`. /// is `Result<u32, MapError>`.
pub fn indices(&self) -> MapKeys<'_, u32> { pub fn indices(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
/// Returns the map's file descriptor. /// Returns the map's file descriptor.
@ -65,14 +65,14 @@ impl<T: AsRef<MapData>> SockMap<T> {
/// The returned file descriptor can be used to attach programs that work with /// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> { pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) Ok(SockMapFd(self.inner.borrow().fd_or_err()?))
} }
} }
impl<T: AsMut<MapData>> SockMap<T> { impl<T: BorrowMut<MapData>> SockMap<T> {
/// Stores a socket into the map. /// Stores a socket into the map.
pub fn set<I: AsRawFd>(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> { pub fn set<I: AsRawFd>(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
check_bounds(data, index)?; check_bounds(data, index)?;
bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err( bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err(
@ -86,7 +86,7 @@ impl<T: AsMut<MapData>> SockMap<T> {
/// Removes the socket stored at `index` from the map. /// Removes the socket stored at `index` from the map.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.as_mut(); let data = self.inner.borrow_mut();
let fd = data.fd_or_err()?; let fd = data.fd_or_err()?;
check_bounds(data, *index)?; check_bounds(data, *index)?;
bpf_map_delete_elem(fd, index) bpf_map_delete_elem(fd, index)

@ -1,7 +1,6 @@
//! A LIFO stack. //! A LIFO stack.
use std::{ use std::{
borrow::Borrow, borrow::{Borrow, BorrowMut},
convert::{AsMut, AsRef},
marker::PhantomData, marker::PhantomData,
}; };
@ -34,9 +33,9 @@ pub struct Stack<T, V: Pod> {
_v: PhantomData<V>, _v: PhantomData<V>,
} }
impl<T: AsRef<MapData>, V: Pod> Stack<T, V> { impl<T: Borrow<MapData>, V: Pod> Stack<T, V> {
pub(crate) fn new(map: T) -> Result<Stack<T, V>, MapError> { pub(crate) fn new(map: T) -> Result<Stack<T, V>, MapError> {
let data = map.as_ref(); let data = map.borrow();
check_kv_size::<(), V>(data)?; check_kv_size::<(), V>(data)?;
let _fd = data.fd_or_err()?; let _fd = data.fd_or_err()?;
@ -51,11 +50,11 @@ impl<T: AsRef<MapData>, V: Pod> Stack<T, V> {
/// ///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn capacity(&self) -> u32 { pub fn capacity(&self) -> u32 {
self.inner.as_ref().obj.max_entries() self.inner.borrow().obj.max_entries()
} }
} }
impl<T: AsMut<MapData>, V: Pod> Stack<T, V> { impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
/// Removes the last element and returns it. /// Removes the last element and returns it.
/// ///
/// # Errors /// # Errors
@ -63,7 +62,7 @@ impl<T: AsMut<MapData>, V: Pod> Stack<T, V> {
/// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`] /// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails. /// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> { pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err( let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| MapError::SyscallError { |(_, io_error)| MapError::SyscallError {
@ -80,7 +79,7 @@ impl<T: AsMut<MapData>, V: Pod> Stack<T, V> {
/// ///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> { pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| { bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError { MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(), call: "bpf_map_update_elem".to_owned(),

@ -1,7 +1,7 @@
//! A hash map of kernel or user space stack traces. //! A hash map of kernel or user space stack traces.
//! //!
//! See [`StackTraceMap`] for documentation and examples. //! See [`StackTraceMap`] for documentation and examples.
use std::{collections::BTreeMap, convert::AsRef, fs, io, mem, path::Path, str::FromStr}; use std::{borrow::Borrow, collections::BTreeMap, fs, io, mem, path::Path, str::FromStr};
use crate::{ use crate::{
maps::{IterableMap, MapData, MapError, MapIter, MapKeys}, maps::{IterableMap, MapData, MapError, MapIter, MapKeys},
@ -67,9 +67,9 @@ pub struct StackTraceMap<T> {
max_stack_depth: usize, max_stack_depth: usize,
} }
impl<T: AsRef<MapData>> StackTraceMap<T> { impl<T: Borrow<MapData>> StackTraceMap<T> {
pub(crate) fn new(map: T) -> Result<StackTraceMap<T>, MapError> { pub(crate) fn new(map: T) -> Result<StackTraceMap<T>, MapError> {
let data = map.as_ref(); let data = map.borrow();
let expected = mem::size_of::<u32>(); let expected = mem::size_of::<u32>();
let size = data.obj.key_size() as usize; let size = data.obj.key_size() as usize;
if size != expected { if size != expected {
@ -102,7 +102,7 @@ impl<T: AsRef<MapData>> StackTraceMap<T> {
/// Returns [`MapError::KeyNotFound`] if there is no stack trace with the /// Returns [`MapError::KeyNotFound`] if there is no stack trace with the
/// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails. /// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, stack_id: &u32, flags: u64) -> Result<StackTrace, MapError> { pub fn get(&self, stack_id: &u32, flags: u64) -> Result<StackTrace, MapError> {
let fd = self.inner.as_ref().fd_or_err()?; let fd = self.inner.borrow().fd_or_err()?;
let mut frames = vec![0; self.max_stack_depth]; let mut frames = vec![0; self.max_stack_depth];
bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags) bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags)
@ -136,13 +136,13 @@ impl<T: AsRef<MapData>> StackTraceMap<T> {
/// An iterator visiting all the stack_ids in arbitrary order. The iterator element /// An iterator visiting all the stack_ids in arbitrary order. The iterator element
/// type is `Result<u32, MapError>`. /// type is `Result<u32, MapError>`.
pub fn stack_ids(&self) -> MapKeys<'_, u32> { pub fn stack_ids(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref()) MapKeys::new(self.inner.borrow())
} }
} }
impl<T: AsRef<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> { impl<T: Borrow<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> {
fn map(&self) -> &MapData { fn map(&self) -> &MapData {
self.inner.as_ref() self.inner.borrow()
} }
fn get(&self, index: &u32) -> Result<StackTrace, MapError> { fn get(&self, index: &u32) -> Result<StackTrace, MapError> {
@ -150,7 +150,7 @@ impl<T: AsRef<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> {
} }
} }
impl<'a, T: AsRef<MapData>> IntoIterator for &'a StackTraceMap<T> { impl<'a, T: Borrow<MapData>> IntoIterator for &'a StackTraceMap<T> {
type Item = Result<(u32, StackTrace), MapError>; type Item = Result<(u32, StackTrace), MapError>;
type IntoIter = MapIter<'a, u32, StackTrace, StackTraceMap<T>>; type IntoIter = MapIter<'a, u32, StackTrace, StackTraceMap<T>>;

@ -35,7 +35,7 @@ pub enum XdpError {
bitflags! { bitflags! {
/// Flags passed to [`Xdp::attach()`]. /// Flags passed to [`Xdp::attach()`].
#[derive(Default)] #[derive(Clone, Copy, Debug, Default)]
pub struct XdpFlags: u32 { pub struct XdpFlags: u32 {
/// Skb mode. /// Skb mode.
const SKB_MODE = XDP_FLAGS_SKB_MODE; const SKB_MODE = XDP_FLAGS_SKB_MODE;
@ -128,7 +128,7 @@ impl Xdp {
let k_ver = kernel_version().unwrap(); let k_ver = kernel_version().unwrap();
if k_ver >= (5, 9, 0) { if k_ver >= (5, 9, 0) {
let link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, None, flags.bits).map_err( let link_fd = bpf_link_create(prog_fd, if_index, BPF_XDP, None, flags.bits()).map_err(
|(_, io_error)| ProgramError::SyscallError { |(_, io_error)| ProgramError::SyscallError {
call: "bpf_link_create".to_owned(), call: "bpf_link_create".to_owned(),
io_error, io_error,
@ -138,7 +138,7 @@ impl Xdp {
.links .links
.insert(XdpLink::new(XdpLinkInner::FdLink(FdLink::new(link_fd)))) .insert(XdpLink::new(XdpLinkInner::FdLink(FdLink::new(link_fd))))
} else { } else {
unsafe { netlink_set_xdp_fd(if_index, prog_fd, None, flags.bits) } unsafe { netlink_set_xdp_fd(if_index, prog_fd, None, flags.bits()) }
.map_err(|io_error| XdpError::NetlinkError { io_error })?; .map_err(|io_error| XdpError::NetlinkError { io_error })?;
self.data self.data
@ -226,9 +226,9 @@ impl Link for NlLink {
fn detach(self) -> Result<(), ProgramError> { fn detach(self) -> Result<(), ProgramError> {
let k_ver = kernel_version().unwrap(); let k_ver = kernel_version().unwrap();
let flags = if k_ver >= (5, 7, 0) { let flags = if k_ver >= (5, 7, 0) {
self.flags.bits | XDP_FLAGS_REPLACE self.flags.bits() | XDP_FLAGS_REPLACE
} else { } else {
self.flags.bits self.flags.bits()
}; };
let _ = unsafe { netlink_set_xdp_fd(self.if_index, -1, Some(self.prog_fd), flags) }; let _ = unsafe { netlink_set_xdp_fd(self.if_index, -1, Some(self.prog_fd), flags) };
Ok(()) Ok(())

@ -8,13 +8,17 @@ use std::{
}; };
use libc::{c_char, c_long, close, ENOENT, ENOSPC}; use libc::{c_char, c_long, close, ENOENT, ENOSPC};
use obj::{
maps::{bpf_map_def, LegacyMap},
BpfSectionKind,
};
use crate::{ use crate::{
generated::{ generated::{
bpf_attach_type, bpf_attr, bpf_btf_info, bpf_cmd, bpf_insn, bpf_link_info, bpf_map_info, bpf_attach_type, bpf_attr, bpf_btf_info, bpf_cmd, bpf_insn, bpf_link_info, bpf_map_info,
bpf_map_type, bpf_prog_info, bpf_prog_type, BPF_F_REPLACE, bpf_map_type, bpf_prog_info, bpf_prog_type, BPF_F_REPLACE,
}, },
maps::PerCpuValues, maps::{MapData, PerCpuValues},
obj::{ obj::{
self, self,
btf::{ btf::{
@ -66,7 +70,7 @@ pub(crate) fn bpf_create_map(name: &CStr, def: &obj::Map, btf_fd: Option<RawFd>)
_ => { _ => {
u.btf_key_type_id = m.def.btf_key_type_id; u.btf_key_type_id = m.def.btf_key_type_id;
u.btf_value_type_id = m.def.btf_value_type_id; u.btf_value_type_id = m.def.btf_value_type_id;
u.btf_fd = btf_fd.unwrap() as u32; u.btf_fd = btf_fd.unwrap_or_default() as u32;
} }
} }
} }
@ -599,6 +603,37 @@ pub(crate) fn is_prog_name_supported() -> bool {
} }
} }
pub(crate) fn is_probe_read_kernel_supported() -> bool {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_3 };
let prog: &[u8] = &[
0xbf, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = r10
0x07, 0x01, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, // r1 -= 8
0xb7, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // r2 = 8
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, // call 113
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let gpl = b"GPL\0";
u.license = gpl.as_ptr() as u64;
let insns = copy_instructions(prog).unwrap();
u.insn_cnt = insns.len() as u32;
u.insns = insns.as_ptr() as u64;
u.prog_type = bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT as u32;
match sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) {
Ok(v) => {
let fd = v as RawFd;
unsafe { close(fd) };
true
}
Err(_) => false,
}
}
pub(crate) fn is_perf_link_supported() -> bool { pub(crate) fn is_perf_link_supported() -> bool {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() }; let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_3 }; let u = unsafe { &mut attr.__bindgen_anon_3 };
@ -630,6 +665,60 @@ pub(crate) fn is_perf_link_supported() -> bool {
false false
} }
pub(crate) fn is_bpf_global_data_supported() -> bool {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_3 };
let prog: &[u8] = &[
0x18, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ld_pseudo r1, 0x2, 0x0
0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, //
0x7a, 0x01, 0x00, 0x00, 0x2a, 0x00, 0x00, 0x00, // stdw [r1 + 0x0], 0x2a
0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mov64 r0 = 0
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let mut insns = copy_instructions(prog).unwrap();
let mut map_data = MapData {
obj: obj::Map::Legacy(LegacyMap {
def: bpf_map_def {
map_type: bpf_map_type::BPF_MAP_TYPE_ARRAY as u32,
key_size: 4,
value_size: 32,
max_entries: 1,
..Default::default()
},
section_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
}),
fd: None,
pinned: false,
btf_fd: None,
};
if let Ok(map_fd) = map_data.create("aya_global") {
insns[0].imm = map_fd;
let gpl = b"GPL\0";
u.license = gpl.as_ptr() as u64;
u.insn_cnt = insns.len() as u32;
u.insns = insns.as_ptr() as u64;
u.prog_type = bpf_prog_type::BPF_PROG_TYPE_SOCKET_FILTER as u32;
if let Ok(v) = sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) {
let fd = v as RawFd;
unsafe { close(fd) };
return true;
}
}
false
}
pub(crate) fn is_btf_supported() -> bool { pub(crate) fn is_btf_supported() -> bool {
let mut btf = Btf::new(); let mut btf = Btf::new();
let name_offset = btf.add_string("int".to_string()); let name_offset = btf.add_string("int".to_string());

@ -8,6 +8,8 @@ mod fake;
use std::io; use std::io;
#[cfg(not(test))] #[cfg(not(test))]
use std::{ffi::CString, mem}; use std::{ffi::CString, mem};
#[cfg(not(test))]
use std::{fs::File, io::Read};
#[cfg(not(test))] #[cfg(not(test))]
use libc::utsname; use libc::utsname;
@ -82,8 +84,40 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
Ok((0xff, 0xff, 0xff)) Ok((0xff, 0xff, 0xff))
} }
#[cfg(not(test))]
fn ubuntu_kernel_version() -> Result<(u32, u32, u32), ()> {
if let Ok(mut file) = File::open("/proc/version_signature") {
let mut buf = String::new();
let mut major = 0u32;
let mut minor = 0u32;
let mut patch = 0u32;
let format = CString::new("%*s %*s %u.%u.%u\n").unwrap();
file.read_to_string(&mut buf).map_err(|_| ())?;
unsafe {
if libc::sscanf(
buf.as_ptr() as *const _,
format.as_ptr(),
&mut major as *mut u32,
&mut minor as *mut _,
&mut patch as *mut _,
) == 3
{
return Ok((major, minor, patch));
}
}
}
Err(())
}
#[cfg(not(test))] #[cfg(not(test))]
pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
if let Ok(version) = ubuntu_kernel_version() {
return Ok(version);
}
unsafe { unsafe {
let mut v = mem::zeroed::<utsname>(); let mut v = mem::zeroed::<utsname>();
if libc::uname(&mut v as *mut _) != 0 { if libc::uname(&mut v as *mut _) != 0 {
@ -93,6 +127,33 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
let mut major = 0u32; let mut major = 0u32;
let mut minor = 0u32; let mut minor = 0u32;
let mut patch = 0u32; let mut patch = 0u32;
let debian_marker = CString::new("Debian").unwrap();
let p = libc::strstr(v.version.as_ptr(), debian_marker.as_ptr());
if !p.is_null() {
let debian_format = CString::new("Debian %u.%u.%u").map_err(|_| ())?;
if libc::sscanf(
p,
debian_format.as_ptr(),
&mut major as *mut u32,
&mut minor as *mut _,
&mut patch as *mut _,
) == 3
{
// On Debian 10, kernels after 4.19.229 expect 4.19.255 due to broken Makefile patches.
let patch_level_limit = if major == 4 && minor == 19 { 230 } else { 255 };
if patch >= patch_level_limit {
patch = 255;
}
return Ok((major, minor, patch));
}
}
let format = CString::new("%u.%u.%u").unwrap(); let format = CString::new("%u.%u.%u").unwrap();
if libc::sscanf( if libc::sscanf(
v.release.as_ptr(), v.release.as_ptr(),

@ -6,8 +6,8 @@ use core::{
use aya_bpf_bindings::helpers::{ use aya_bpf_bindings::helpers::{
bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace, bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace,
bpf_skb_adjust_room, bpf_skb_change_type, bpf_skb_load_bytes, bpf_skb_pull_data, bpf_skb_adjust_room, bpf_skb_change_proto, bpf_skb_change_type, bpf_skb_load_bytes,
bpf_skb_store_bytes, bpf_skb_pull_data, bpf_skb_store_bytes,
}; };
use aya_bpf_cty::c_long; use aya_bpf_cty::c_long;
@ -189,6 +189,16 @@ impl SkBuff {
} }
} }
#[inline]
pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_proto(self.as_ptr() as *mut _, proto, flags) };
if ret == 0 {
Ok(())
} else {
Err(ret)
}
}
#[inline] #[inline]
pub fn change_type(&self, ty: u32) -> Result<(), c_long> { pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) }; let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) };

@ -142,6 +142,11 @@ impl TcContext {
self.skb.clone_redirect(if_index, flags) self.skb.clone_redirect(if_index, flags)
} }
#[inline]
pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> {
self.skb.change_proto(proto, flags)
}
#[inline] #[inline]
pub fn change_type(&self, ty: u32) -> Result<(), c_long> { pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
self.skb.change_type(ty) self.skb.change_type(ty)

@ -6,6 +6,11 @@ publish = false
[dependencies] [dependencies]
aya-bpf = { path = "../../bpf/aya-bpf" } aya-bpf = { path = "../../bpf/aya-bpf" }
aya-log-ebpf = { path = "../../bpf/aya-log-ebpf" }
[[bin]]
name = "log"
path = "src/log.rs"
[[bin]] [[bin]]
name = "map_test" name = "map_test"

@ -0,0 +1,27 @@
#![no_std]
#![no_main]
use aya_bpf::{macros::uprobe, programs::ProbeContext};
use aya_log_ebpf::{debug, error, info, trace, warn};
#[uprobe]
pub fn test_log(ctx: ProbeContext) {
debug!(&ctx, "Hello from eBPF!");
error!(&ctx, "{}, {}, {}", 69, 420i32, "wao");
let ipv4 = 167772161u32; // 10.0.0.1
let ipv6 = [
32u8, 1u8, 13u8, 184u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 1u8,
]; // 2001:db8::1
info!(&ctx, "ipv4: {:ipv4}, ipv6: {:ipv6}", ipv4, ipv6);
let mac = [4u8, 32u8, 6u8, 9u8, 0u8, 64u8];
trace!(&ctx, "mac lc: {:mac}, mac uc: {:MAC}", mac, mac);
let hex = 0x2f;
warn!(&ctx, "hex lc: {:x}, hex uc: {:X}", hex, hex);
let hex = [0xde, 0xad, 0xbe, 0xef].as_slice();
debug!(&ctx, "hex lc: {:x}, hex uc: {:X}", hex, hex);
}
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe { core::hint::unreachable_unchecked() }
}

@ -6,7 +6,8 @@ publish = false
[dependencies] [dependencies]
quote = "1" quote = "1"
syn = {version = "1.0", features = ["full"]} proc-macro2 = "1.0"
syn = {version = "2.0", features = ["full"]}
[lib] [lib]
proc-macro = true proc-macro = true

@ -1,6 +1,7 @@
use proc_macro::TokenStream; use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote; use quote::quote;
use syn::{parse_macro_input, ItemFn}; use syn::{parse_macro_input, Ident, ItemFn};
#[proc_macro_attribute] #[proc_macro_attribute]
pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream { pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
@ -17,3 +18,29 @@ pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
}; };
TokenStream::from(expanded) TokenStream::from(expanded)
} }
#[proc_macro_attribute]
pub fn tokio_integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
let item = parse_macro_input!(item as ItemFn);
let name = &item.sig.ident;
let name_str = &item.sig.ident.to_string();
let sync_name_str = format!("sync_{name_str}");
let sync_name = Ident::new(&sync_name_str, Span::call_site());
let expanded = quote! {
#item
fn #sync_name() {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(#name());
}
inventory::submit!(crate::IntegrationTest {
name: concat!(module_path!(), "::", #sync_name_str),
test_fn: #sync_name,
});
};
TokenStream::from(expanded)
}

@ -7,16 +7,19 @@ publish = false
[dependencies] [dependencies]
anyhow = "1" anyhow = "1"
aya = { path = "../../aya" } aya = { path = "../../aya" }
aya-log = { path = "../../aya-log" }
aya-obj = { path = "../../aya-obj" } aya-obj = { path = "../../aya-obj" }
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
env_logger = "0.10" env_logger = "0.10"
futures-core = "0.3"
inventory = "0.3" inventory = "0.3"
integration-test-macros = { path = "../integration-test-macros" } integration-test-macros = { path = "../integration-test-macros" }
lazy_static = "1" lazy_static = "1"
libc = { version = "0.2.105" } libc = { version = "0.2.105" }
log = "0.4" log = "0.4"
object = { version = "0.30", default-features = false, features = ["std", "read_core", "elf"] } object = { version = "0.31", default-features = false, features = ["std", "read_core", "elf"] }
rbpf = "0.1.0" rbpf = "0.1.0"
regex = "1" regex = "1"
tempfile = "3.3.0" tempfile = "3.3.0"
libtest-mimic = "0.6.0" libtest-mimic = "0.6.0"
tokio = { version = "1.24", features = ["rt", "rt-multi-thread", "sync", "time"] }

@ -0,0 +1,140 @@
use std::sync::{Arc, LockResult, Mutex, MutexGuard};
use aya::{include_bytes_aligned, programs::UProbe, Bpf};
use aya_log::BpfLogger;
use log::{Level, Log, Record};
use tokio::time::{sleep, Duration};
use super::tokio_integration_test;
const MAX_ATTEMPTS: usize = 10;
const TIMEOUT_MS: u64 = 10;
#[no_mangle]
#[inline(never)]
pub extern "C" fn trigger_ebpf_program() {}
struct CapturedLogs(Arc<Mutex<Vec<CapturedLog>>>);
impl CapturedLogs {
fn with_capacity(capacity: usize) -> Self {
Self(Arc::new(Mutex::new(Vec::with_capacity(capacity))))
}
fn clone(&self) -> Self {
Self(self.0.clone())
}
fn lock(&self) -> LockResult<MutexGuard<'_, Vec<CapturedLog>>> {
self.0.lock()
}
async fn wait_expected_len(&self, expected_len: usize) {
for _ in 0..MAX_ATTEMPTS {
{
let captured_logs = self.0.lock().expect("Failed to lock captured logs");
if captured_logs.len() == expected_len {
return;
}
}
sleep(Duration::from_millis(TIMEOUT_MS)).await;
}
panic!(
"Expected {} captured logs, but got {}",
expected_len,
self.0.lock().unwrap().len()
);
}
}
struct CapturedLog {
pub body: String,
pub level: Level,
pub target: String,
}
struct TestingLogger {
captured_logs: CapturedLogs,
}
impl TestingLogger {
pub fn with_capacity(capacity: usize) -> (Self, CapturedLogs) {
let captured_logs = CapturedLogs::with_capacity(capacity);
(
Self {
captured_logs: captured_logs.clone(),
},
captured_logs,
)
}
}
impl Log for TestingLogger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn flush(&self) {}
fn log(&self, record: &Record) {
let captured_record = CapturedLog {
body: format!("{}", record.args()),
level: record.level(),
target: record.target().to_string(),
};
self.captured_logs
.lock()
.expect("Failed to acquire a lock for storing a log")
.push(captured_record);
}
}
#[tokio_integration_test]
async fn log() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/log");
let mut bpf = Bpf::load(bytes).unwrap();
let (logger, captured_logs) = TestingLogger::with_capacity(5);
BpfLogger::init_with_logger(&mut bpf, logger).unwrap();
let prog: &mut UProbe = bpf.program_mut("test_log").unwrap().try_into().unwrap();
prog.load().unwrap();
prog.attach(Some("trigger_ebpf_program"), 0, "/proc/self/exe", None)
.unwrap();
// Call the function that the uprobe is attached to, so it starts logging.
trigger_ebpf_program();
captured_logs.wait_expected_len(6).await;
let records = captured_logs
.lock()
.expect("Failed to acquire a lock for reading logs");
assert_eq!(records.len(), 6);
assert_eq!(records[0].body, "Hello from eBPF!");
assert_eq!(records[0].level, Level::Debug);
assert_eq!(records[0].target, "log");
assert_eq!(records[1].body, "69, 420, wao");
assert_eq!(records[1].level, Level::Error);
assert_eq!(records[1].target, "log");
assert_eq!(records[2].body, "ipv4: 10.0.0.1, ipv6: 2001:db8::1");
assert_eq!(records[2].level, Level::Info);
assert_eq!(records[2].target, "log");
assert_eq!(
records[3].body,
"mac lc: 04:20:06:09:00:40, mac uc: 04:20:06:09:00:40"
);
assert_eq!(records[3].level, Level::Trace);
assert_eq!(records[3].target, "log");
assert_eq!(records[4].body, "hex lc: 2f, hex uc: 2F");
assert_eq!(records[4].level, Level::Warn);
assert_eq!(records[4].target, "log");
assert_eq!(records[5].body, "hex lc: deadbeef, hex uc: DEADBEEF");
assert_eq!(records[5].level, Level::Debug);
assert_eq!(records[5].target, "log");
}

@ -7,11 +7,13 @@ use std::{ffi::CStr, mem};
pub mod btf_relocations; pub mod btf_relocations;
pub mod elf; pub mod elf;
pub mod load; pub mod load;
pub mod log;
pub mod rbpf; pub mod rbpf;
pub mod relocations; pub mod relocations;
pub mod smoke; pub mod smoke;
pub use integration_test_macros::integration_test; pub use integration_test_macros::{integration_test, tokio_integration_test};
#[derive(Debug)] #[derive(Debug)]
pub struct IntegrationTest { pub struct IntegrationTest {
pub name: &'static str, pub name: &'static str,

@ -197,7 +197,7 @@ EOF
exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \ exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \
-y --profile minimal --default-toolchain nightly --component rust-src --component clippy' -y --profile minimal --default-toolchain nightly --component rust-src --component clippy'
exec_vm 'echo source ~/.cargo/env >> ~/.bashrc' exec_vm 'echo source ~/.cargo/env >> ~/.bashrc'
exec_vm cargo install bpf-linker --no-default-features --features system-llvm exec_vm cargo install bpf-linker --no-default-features
} }
scp_vm() { scp_vm() {

@ -8,7 +8,7 @@ edition = "2021"
aya-tool = { path = "../aya-tool" } aya-tool = { path = "../aya-tool" }
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
anyhow = "1" anyhow = "1"
syn = "1" syn = "2"
quote = "1" quote = "1"
proc-macro2 = "1" proc-macro2 = "1"
indoc = "2.0" indoc = "2.0"

@ -46,6 +46,7 @@ pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> {
"sk_action", "sk_action",
"pt_regs", "pt_regs",
"user_pt_regs", "user_pt_regs",
"user_regs_struct",
"xdp_action", "xdp_action",
]; ];
let vars = ["BPF_.*", "bpf_.*", "TC_ACT_.*", "SOL_SOCKET", "SO_.*"]; let vars = ["BPF_.*", "bpf_.*", "TC_ACT_.*", "SOL_SOCKET", "SO_.*"];

Loading…
Cancel
Save