lint all crates; enable strict pointer lints

reviewable/pr1354/r2
Tamir Duberstein 2 weeks ago
parent ec3eacc1d8
commit 5f5305c2a8
No known key found for this signature in database

@ -110,7 +110,51 @@ xdpilone = { version = "1.0.5", default-features = false }
xz2 = { version = "0.1.7", default-features = false }
[workspace.lints.clippy]
all = "warn"
as_ptr_cast_mut = "warn"
#as_underscore = "warn"
cast_lossless = "warn"
#cast_possible_truncation = "warn"
#cast_possible_wrap = "warn"
cast_precision_loss = "warn"
#cast_sign_loss = "warn"
char_lit_as_u8 = "warn"
fn_to_numeric_cast = "warn"
fn_to_numeric_cast_with_truncation = "warn"
mut_mut = "warn"
needless_bitwise_bool = "warn"
needless_lifetimes = "warn"
no_mangle_with_rust_abi = "warn"
ptr_as_ptr = "warn"
ptr_cast_constness = "warn"
ref_as_ptr = "warn"
unnecessary_cast = "warn"
unused_trait_names = "warn"
use_self = "warn"
[workspace.lints.rust]
unused-extern-crates = "warn"
absolute_paths_not_starting_with_crate = "warn"
deprecated_in_future = "warn"
elided_lifetimes_in_paths = "warn"
explicit_outlives_requirements = "warn"
ffi_unwind_calls = "warn"
keyword_idents = "warn"
#let_underscore_drop = "warn"
macro_use_extern_crate = "warn"
meta_variable_misuse = "warn"
missing_abi = "warn"
#missing_copy_implementations = "warn"
non_ascii_idents = "warn"
noop_method_call = "warn"
single_use_lifetimes = "warn"
trivial_numeric_casts = "warn"
unreachable_pub = "warn"
#unsafe_op_in_unsafe_fn = "warn"
unstable_features = "warn"
unused_crate_dependencies = "warn"
unused_extern_crates = "warn"
unused_import_braces = "warn"
unused_lifetimes = "warn"
unused_macro_rules = "warn"
#unused_qualifications = "warn" # https://github.com/rust-lang/rust/commit/9ccc7b7 added size_of to the prelude, but we need to continue to qualify it so that we build on older compilers.
#unused_results = "warn"

@ -22,7 +22,10 @@ use cargo_metadata::{Artifact, CompilerMessage, Message, Package, Target};
/// prevent their use for the time being.
///
/// [bindeps]: https://doc.rust-lang.org/nightly/cargo/reference/unstable.html?highlight=feature#artifact-dependencies
pub fn build_ebpf(packages: impl IntoIterator<Item = Package>, toolchain: Toolchain) -> Result<()> {
pub fn build_ebpf(
packages: impl IntoIterator<Item = Package>,
toolchain: Toolchain<'_>,
) -> Result<()> {
let out_dir = env::var_os("OUT_DIR").ok_or(anyhow!("OUT_DIR not set"))?;
let out_dir = PathBuf::from(out_dir);

@ -19,7 +19,7 @@ pub(crate) struct Args {
}
impl Parse for Args {
fn parse(input: ParseStream) -> Result<Args> {
fn parse(input: ParseStream<'_>) -> Result<Self> {
let args = Punctuated::<Arg, Token![,]>::parse_terminated_with(input, |input| {
let ident = input.parse::<Ident>()?;
let lookahead = input.lookahead1();
@ -42,7 +42,7 @@ impl Parse for Args {
})
.collect();
Ok(Args { args })
Ok(Self { args })
}
}

@ -12,11 +12,11 @@ pub(crate) struct BtfMap {
}
impl BtfMap {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<BtfMap> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item: ItemStatic = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let name = name_arg(&mut args).unwrap_or_else(|| item.ident.to_string());
Ok(BtfMap { item, name })
Ok(Self { item, name })
}
pub(crate) fn expand(&self) -> TokenStream {

@ -11,10 +11,7 @@ pub(crate) struct CgroupSockopt {
}
impl CgroupSockopt {
pub(crate) fn parse(
attrs: TokenStream,
item: TokenStream,
) -> Result<CgroupSockopt, Diagnostic> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self, Diagnostic> {
if attrs.is_empty() {
return Err(attrs.span().error("missing attach type"));
}

@ -13,7 +13,7 @@ impl FlowDissector {
return Err(attrs.span().error("unexpected attribute"));
}
let item = syn::parse2(item)?;
Ok(FlowDissector { item })
Ok(Self { item })
}
pub(crate) fn expand(&self) -> TokenStream {

@ -1,3 +1,5 @@
#![cfg_attr(test, expect(unused_crate_dependencies, reason = "used in doctests"))]
pub(crate) mod args;
mod btf_map;
mod btf_tracepoint;

@ -11,7 +11,7 @@ pub(crate) struct Map {
}
impl Map {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Map> {
pub(crate) fn parse(attrs: TokenStream, item: TokenStream) -> Result<Self> {
let item: ItemStatic = syn::parse2(item)?;
let mut args = syn::parse2(attrs)?;
let name = name_arg(&mut args).unwrap_or_else(|| item.ident.to_string());

@ -207,8 +207,8 @@ where
{
fn as_ref(&self) -> &[u8] {
match self {
Either::Left(l) => l.as_ref(),
Either::Right(r) => r.as_ref(),
Self::Left(l) => l.as_ref(),
Self::Right(r) => r.as_ref(),
}
}
}
@ -217,11 +217,11 @@ impl sealed::Sealed for IpAddr {}
impl Argument for IpAddr {
fn as_argument(&self) -> (ArgumentKind, impl AsRef<[u8]>) {
match self {
IpAddr::V4(ipv4_addr) => {
Self::V4(ipv4_addr) => {
let (kind, value) = ipv4_addr.as_argument();
(kind, Either::Left(value))
}
IpAddr::V6(ipv6_addr) => {
Self::V6(ipv6_addr) => {
let (kind, value) = ipv6_addr.as_argument();
(kind, Either::Right(value))
}

@ -21,7 +21,7 @@ mod kw {
}
impl Parse for LogArgs {
fn parse(input: ParseStream) -> Result<Self> {
fn parse(input: ParseStream<'_>) -> Result<Self> {
let ctx: Expr = input.parse()?;
input.parse::<Token![,]>()?;

@ -56,7 +56,9 @@
//! [env_logger]: https://docs.rs/env_logger
//! [Log]: https://docs.rs/log/0.4.14/log/trait.Log.html
//! [log]: https://docs.rs/log
//!
#![cfg_attr(test, expect(unused_crate_dependencies, reason = "used in doctests"))]
use std::{
fmt::{LowerHex, UpperHex},
mem,
@ -174,7 +176,7 @@ impl<T: Log> EbpfLogger<T> {
fn new(map: Map, logger: T) -> Result<Self, Error> {
let ring_buf: RingBuf<_> = map.try_into()?;
Ok(EbpfLogger { ring_buf, logger })
Ok(Self { ring_buf, logger })
}
/// Reads log records from eBPF and writes them to the logger.
@ -495,7 +497,7 @@ fn log_buf<T: ?Sized + Log>(mut buf: &[u8], logger: &T) -> Result<(), ()> {
}
RecordFieldKind::Level => {
let level = level.replace({
let level = unsafe { ptr::read_unaligned(value.as_ptr() as *const _) };
let level = unsafe { ptr::read_unaligned(value.as_ptr().cast()) };
match level {
Level::Error => log::Level::Error,
Level::Warn => log::Level::Warn,
@ -557,7 +559,7 @@ fn log_buf<T: ?Sized + Log>(mut buf: &[u8], logger: &T) -> Result<(), ()> {
match tag {
ArgumentKind::DisplayHint => {
last_hint = Some(unsafe { ptr::read_unaligned(value.as_ptr() as *const _) });
last_hint = Some(unsafe { ptr::read_unaligned(value.as_ptr().cast()) });
}
ArgumentKind::I8 => {
full_log_msg.push_str(
@ -717,7 +719,7 @@ fn log_buf<T: ?Sized + Log>(mut buf: &[u8], logger: &T) -> Result<(), ()> {
.map_err(|std::array::TryFromSliceError { .. }| ())?;
let mut value: [u16; 8] = Default::default();
for (i, s) in data.chunks_exact(2).enumerate() {
value[i] = ((s[1] as u16) << 8) | s[0] as u16;
value[i] = (u16::from(s[1]) << 8) | u16::from(s[0]);
}
full_log_msg.push_str(&value.format(last_hint.take())?);
}
@ -754,7 +756,7 @@ fn try_read<T: Pod>(mut buf: &[u8]) -> Result<(T, &[u8], &[u8]), ()> {
return Err(());
}
let tag = unsafe { ptr::read_unaligned(buf.as_ptr() as *const T) };
let tag = unsafe { ptr::read_unaligned(buf.as_ptr().cast::<T>()) };
buf = &buf[mem::size_of::<T>()..];
let len =

@ -187,7 +187,7 @@ impl BtfFeatures {
btf_type_tag: bool,
btf_enum64: bool,
) -> Self {
BtfFeatures {
Self {
btf_func,
btf_func_global,
btf_datasec,
@ -257,8 +257,8 @@ pub struct Btf {
impl Btf {
/// Creates a new empty instance with its header initialized
pub fn new() -> Btf {
Btf {
pub fn new() -> Self {
Self {
header: btf_header {
magic: 0xeb9f,
version: 0x01,
@ -305,8 +305,8 @@ impl Btf {
/// Loads BTF metadata from `/sys/kernel/btf/vmlinux`.
#[cfg(feature = "std")]
pub fn from_sys_fs() -> Result<Btf, BtfError> {
Btf::parse_file("/sys/kernel/btf/vmlinux", Endianness::default())
pub fn from_sys_fs() -> Result<Self, BtfError> {
Self::parse_file("/sys/kernel/btf/vmlinux", Endianness::default())
}
/// Loads BTF metadata from the given `path`.
@ -314,10 +314,10 @@ impl Btf {
pub fn parse_file<P: AsRef<std::path::Path>>(
path: P,
endianness: Endianness,
) -> Result<Btf, BtfError> {
) -> Result<Self, BtfError> {
use std::{borrow::ToOwned as _, fs};
let path = path.as_ref();
Btf::parse(
Self::parse(
&fs::read(path).map_err(|error| BtfError::FileError {
path: path.to_owned(),
error,
@ -327,7 +327,7 @@ impl Btf {
}
/// Parses BTF from binary data of the given endianness
pub fn parse(data: &[u8], endianness: Endianness) -> Result<Btf, BtfError> {
pub fn parse(data: &[u8], endianness: Endianness) -> Result<Self, BtfError> {
if data.len() < mem::size_of::<btf_header>() {
return Err(BtfError::InvalidHeader);
}
@ -342,9 +342,9 @@ impl Btf {
}
let strings = data[str_off..str_off + str_len].to_vec();
let types = Btf::read_type_info(&header, data, endianness)?;
let types = Self::read_type_info(&header, data, endianness)?;
Ok(Btf {
Ok(Self {
header,
strings,
types,
@ -766,11 +766,7 @@ pub struct BtfExt {
}
impl BtfExt {
pub(crate) fn parse(
data: &[u8],
endianness: Endianness,
btf: &Btf,
) -> Result<BtfExt, BtfError> {
pub(crate) fn parse(data: &[u8], endianness: Endianness, btf: &Btf) -> Result<Self, BtfError> {
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct MinimalHeader {
@ -788,7 +784,7 @@ impl BtfExt {
// first find the actual size of the header by converting into the minimal valid header
// Safety: MinimalHeader is POD so read_unaligned is safe
let minimal_header = unsafe {
ptr::read_unaligned::<MinimalHeader>(data.as_ptr() as *const MinimalHeader)
ptr::read_unaligned::<MinimalHeader>(data.as_ptr().cast::<MinimalHeader>())
};
let len_to_read = minimal_header.hdr_len as usize;
@ -812,7 +808,7 @@ impl BtfExt {
// data.len(). Additionally, we know that the header has
// been initialized so it's safe to call for assume_init.
unsafe {
core::ptr::copy(data.as_ptr(), header.as_mut_ptr() as *mut u8, len_to_read);
core::ptr::copy(data.as_ptr(), header.as_mut_ptr().cast::<u8>(), len_to_read);
header.assume_init()
}
};
@ -851,7 +847,7 @@ impl BtfExt {
})
};
let mut ext = BtfExt {
let mut ext = Self {
header,
relocations: Vec::new(),
func_info: FuncInfo::new(),

@ -41,7 +41,7 @@ impl FuncSecInfo {
rec_size: usize,
func_info_data: &[u8],
endianness: Endianness,
) -> FuncSecInfo {
) -> Self {
let func_info = func_info_data
.chunks(rec_size)
.map(|data| {
@ -65,7 +65,7 @@ impl FuncSecInfo {
})
.collect();
FuncSecInfo {
Self {
_sec_name_offset: sec_name_offset,
num_info,
func_info,
@ -101,8 +101,8 @@ pub struct FuncInfo {
}
impl FuncInfo {
pub(crate) fn new() -> FuncInfo {
FuncInfo {
pub(crate) fn new() -> Self {
Self {
data: HashMap::new(),
}
}
@ -138,7 +138,7 @@ impl LineSecInfo {
rec_size: usize,
func_info_data: &[u8],
endianness: Endianness,
) -> LineSecInfo {
) -> Self {
let line_info = func_info_data
.chunks(rec_size)
.map(|data| {
@ -171,7 +171,7 @@ impl LineSecInfo {
})
.collect();
LineSecInfo {
Self {
_sec_name_offset: sec_name_offset,
num_info,
line_info,
@ -201,8 +201,8 @@ pub(crate) struct LineInfo {
}
impl LineInfo {
pub(crate) fn new() -> LineInfo {
LineInfo {
pub(crate) fn new() -> Self {
Self {
data: HashMap::new(),
}
}

@ -200,14 +200,14 @@ pub(crate) struct Relocation {
}
impl Relocation {
pub(crate) unsafe fn parse(data: &[u8], number: usize) -> Result<Relocation, BtfError> {
pub(crate) unsafe fn parse(data: &[u8], number: usize) -> Result<Self, BtfError> {
if mem::size_of::<bpf_core_relo>() > data.len() {
return Err(BtfError::InvalidRelocationInfo);
}
let rel = unsafe { ptr::read_unaligned::<bpf_core_relo>(data.as_ptr() as *const _) };
let rel = unsafe { ptr::read_unaligned::<bpf_core_relo>(data.as_ptr().cast()) };
Ok(Relocation {
Ok(Self {
kind: rel.kind.try_into()?,
ins_offset: rel.insn_off as usize,
type_id: rel.type_id,
@ -225,7 +225,7 @@ impl Object {
_ => return Ok(()),
};
let mut candidates_cache = HashMap::<u32, Vec<Candidate>>::new();
let mut candidates_cache = HashMap::<u32, Vec<Candidate<'_>>>::new();
for (sec_name_off, relos) in btf_ext.relocations() {
let section_name =
local_btf
@ -430,8 +430,8 @@ fn find_candidates<'target>(
}
fn match_candidate<'target>(
local_spec: &AccessSpec,
candidate: &'target Candidate,
local_spec: &AccessSpec<'_>,
candidate: &'target Candidate<'_>,
) -> Result<Option<AccessSpec<'target>>, RelocationError> {
let mut target_spec = AccessSpec {
btf: candidate.btf,
@ -466,7 +466,7 @@ fn match_candidate<'target>(
fn match_enum<'a>(
iterator: impl Iterator<Item = (usize, u32)>,
candidate: &Candidate,
candidate: &Candidate<'_>,
local_variant_name: &str,
target_id: u32,
mut target_spec: AccessSpec<'a>,
@ -672,7 +672,7 @@ impl<'a> AccessSpec<'a> {
root_type_id: u32,
spec: &str,
relocation: Relocation,
) -> Result<AccessSpec<'a>, RelocationError> {
) -> Result<Self, RelocationError> {
let parts = spec
.split(':')
.map(|s| s.parse::<usize>())
@ -895,21 +895,21 @@ fn poison_insn(ins: &mut bpf_insn) {
impl ComputedRelocation {
fn new(
rel: &Relocation,
local_spec: &AccessSpec,
target_spec: Option<&AccessSpec>,
) -> Result<ComputedRelocation, RelocationError> {
local_spec: &AccessSpec<'_>,
target_spec: Option<&AccessSpec<'_>>,
) -> Result<Self, RelocationError> {
use RelocationKind::*;
let ret = match rel.kind {
FieldByteOffset | FieldByteSize | FieldExists | FieldSigned | FieldLShift64
| FieldRShift64 => ComputedRelocation {
| FieldRShift64 => Self {
local: Self::compute_field_relocation(rel, Some(local_spec))?,
target: Self::compute_field_relocation(rel, target_spec).ok(),
},
TypeIdLocal | TypeIdTarget | TypeExists | TypeSize => ComputedRelocation {
TypeIdLocal | TypeIdTarget | TypeExists | TypeSize => Self {
local: Self::compute_type_relocation(rel, local_spec, target_spec)?,
target: Self::compute_type_relocation(rel, local_spec, target_spec).ok(),
},
EnumVariantExists | EnumVariantValue => ComputedRelocation {
EnumVariantExists | EnumVariantValue => Self {
local: Self::compute_enum_relocation(rel, Some(local_spec))?,
target: Self::compute_enum_relocation(rel, target_spec).ok(),
},
@ -959,7 +959,7 @@ impl ComputedRelocation {
return Ok(());
};
let class = (ins.code & 0x07) as u32;
let class = u32::from(ins.code & 0x07);
let target_value = target.value;
@ -1055,11 +1055,11 @@ impl ComputedRelocation {
fn compute_enum_relocation(
rel: &Relocation,
spec: Option<&AccessSpec>,
spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
let value = match (rel.kind, spec) {
(EnumVariantExists, spec) => spec.is_some() as u64,
(EnumVariantExists, spec) => u64::from(spec.is_some()),
(EnumVariantValue, Some(spec)) => {
let accessor = &spec.accessors[0];
match spec.btf.type_by_id(accessor.type_id)? {
@ -1068,12 +1068,12 @@ impl ComputedRelocation {
if en.is_signed() {
value as i32 as u64
} else {
value as u64
u64::from(value)
}
}
BtfType::Enum64(en) => {
let variant = &en.variants[accessor.index];
((variant.value_high as u64) << 32) | variant.value_low as u64
(u64::from(variant.value_high) << 32) | u64::from(variant.value_low)
}
// candidate selection ensures that rel_kind == local_kind == target_kind
_ => unreachable!(),
@ -1097,7 +1097,7 @@ impl ComputedRelocation {
fn compute_field_relocation(
rel: &Relocation,
spec: Option<&AccessSpec>,
spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
@ -1105,7 +1105,7 @@ impl ComputedRelocation {
// this is the bpf_preserve_field_info(member_access, FIELD_EXISTENCE) case. If we
// managed to build a spec, it means the field exists.
return Ok(ComputedRelocationValue {
value: spec.is_some() as u64,
value: u64::from(spec.is_some()),
size: 0,
type_id: None,
});
@ -1196,30 +1196,30 @@ impl ComputedRelocation {
match rel.kind {
FieldByteOffset => {
value.value = byte_off as u64;
value.value = u64::from(byte_off);
if !is_bitfield {
value.size = byte_size;
value.type_id = Some(member_type_id);
}
}
FieldByteSize => {
value.value = byte_size as u64;
value.value = u64::from(byte_size);
}
FieldSigned => match member_ty {
BtfType::Enum(en) => value.value = en.is_signed() as u64,
BtfType::Enum64(en) => value.value = en.is_signed() as u64,
BtfType::Enum(en) => value.value = u64::from(en.is_signed()),
BtfType::Enum64(en) => value.value = u64::from(en.is_signed()),
BtfType::Int(i) => value.value = i.encoding() as u64 & IntEncoding::Signed as u64,
_ => (),
},
FieldLShift64 => {
value.value = if cfg!(target_endian = "little") {
64 - (bit_off + bit_size - byte_off * 8) as u64
64 - u64::from(bit_off + bit_size - byte_off * 8)
} else {
((8 - byte_size) * 8 + (bit_off - byte_off * 8)) as u64
u64::from((8 - byte_size) * 8 + (bit_off - byte_off * 8))
}
}
FieldRShift64 => {
value.value = 64 - bit_size as u64;
value.value = 64 - u64::from(bit_size);
}
kind @ (FieldExists | TypeIdLocal | TypeIdTarget | TypeExists | TypeSize
| EnumVariantExists | EnumVariantValue) => {
@ -1232,15 +1232,15 @@ impl ComputedRelocation {
fn compute_type_relocation(
rel: &Relocation,
local_spec: &AccessSpec,
target_spec: Option<&AccessSpec>,
local_spec: &AccessSpec<'_>,
target_spec: Option<&AccessSpec<'_>>,
) -> Result<ComputedRelocationValue, RelocationError> {
use RelocationKind::*;
let value = match (rel.kind, target_spec) {
(TypeIdLocal, _) => local_spec.root_type_id as u64,
(TypeIdTarget, Some(target_spec)) => target_spec.root_type_id as u64,
(TypeExists, target_spec) => target_spec.is_some() as u64,
(TypeIdLocal, _) => u64::from(local_spec.root_type_id),
(TypeIdTarget, Some(target_spec)) => u64::from(target_spec.root_type_id),
(TypeExists, target_spec) => u64::from(target_spec.is_some()),
(TypeSize, Some(target_spec)) => {
target_spec.btf.type_size(target_spec.root_type_id)? as u64
}

@ -41,7 +41,7 @@ pub struct Fwd {
impl Fwd {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Fwd>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -63,7 +63,7 @@ pub struct Const {
impl Const {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Const>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -94,7 +94,7 @@ pub struct Volatile {
impl Volatile {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Volatile>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -115,7 +115,7 @@ pub struct Restrict {
impl Restrict {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
bytes_of::<Restrict>(self).to_vec()
bytes_of::<Self>(self).to_vec()
}
pub(crate) fn kind(&self) -> BtfKind {
@ -239,10 +239,10 @@ pub enum FuncLinkage {
impl From<u32> for FuncLinkage {
fn from(v: u32) -> Self {
match v {
0 => FuncLinkage::Static,
1 => FuncLinkage::Global,
2 => FuncLinkage::Extern,
_ => FuncLinkage::Unknown,
0 => Self::Static,
1 => Self::Global,
2 => Self::Extern,
_ => Self::Unknown,
}
}
}
@ -322,11 +322,11 @@ pub enum IntEncoding {
impl From<u32> for IntEncoding {
fn from(v: u32) -> Self {
match v {
0 => IntEncoding::None,
1 => IntEncoding::Signed,
2 => IntEncoding::Char,
4 => IntEncoding::Bool,
_ => IntEncoding::Unknown,
0 => Self::None,
1 => Self::Signed,
2 => Self::Char,
4 => Self::Bool,
_ => Self::Unknown,
}
}
}
@ -549,7 +549,7 @@ impl Enum64 {
info |= 1 << 31
};
info |= (variants.len() as u32) & 0xFFFF;
Enum64 {
Self {
name_offset,
info,
// According to the documentation:
@ -861,10 +861,10 @@ pub enum VarLinkage {
impl From<u32> for VarLinkage {
fn from(v: u32) -> Self {
match v {
0 => VarLinkage::Static,
1 => VarLinkage::Global,
2 => VarLinkage::Extern,
_ => VarLinkage::Unknown,
0 => Self::Static,
1 => Self::Global,
2 => Self::Extern,
_ => Self::Unknown,
}
}
}
@ -1088,26 +1088,26 @@ impl TryFrom<u32> for BtfKind {
impl Display for BtfKind {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
match self {
BtfKind::Unknown => write!(f, "[UNKNOWN]"),
BtfKind::Int => write!(f, "[INT]"),
BtfKind::Float => write!(f, "[FLOAT]"),
BtfKind::Ptr => write!(f, "[PTR]"),
BtfKind::Array => write!(f, "[ARRAY]"),
BtfKind::Struct => write!(f, "[STRUCT]"),
BtfKind::Union => write!(f, "[UNION]"),
BtfKind::Enum => write!(f, "[ENUM]"),
BtfKind::Fwd => write!(f, "[FWD]"),
BtfKind::Typedef => write!(f, "[TYPEDEF]"),
BtfKind::Volatile => write!(f, "[VOLATILE]"),
BtfKind::Const => write!(f, "[CONST]"),
BtfKind::Restrict => write!(f, "[RESTRICT]"),
BtfKind::Func => write!(f, "[FUNC]"),
BtfKind::FuncProto => write!(f, "[FUNC_PROTO]"),
BtfKind::Var => write!(f, "[VAR]"),
BtfKind::DataSec => write!(f, "[DATASEC]"),
BtfKind::DeclTag => write!(f, "[DECL_TAG]"),
BtfKind::TypeTag => write!(f, "[TYPE_TAG]"),
BtfKind::Enum64 => write!(f, "[ENUM64]"),
Self::Unknown => write!(f, "[UNKNOWN]"),
Self::Int => write!(f, "[INT]"),
Self::Float => write!(f, "[FLOAT]"),
Self::Ptr => write!(f, "[PTR]"),
Self::Array => write!(f, "[ARRAY]"),
Self::Struct => write!(f, "[STRUCT]"),
Self::Union => write!(f, "[UNION]"),
Self::Enum => write!(f, "[ENUM]"),
Self::Fwd => write!(f, "[FWD]"),
Self::Typedef => write!(f, "[TYPEDEF]"),
Self::Volatile => write!(f, "[VOLATILE]"),
Self::Const => write!(f, "[CONST]"),
Self::Restrict => write!(f, "[RESTRICT]"),
Self::Func => write!(f, "[FUNC]"),
Self::FuncProto => write!(f, "[FUNC_PROTO]"),
Self::Var => write!(f, "[VAR]"),
Self::DataSec => write!(f, "[DATASEC]"),
Self::DeclTag => write!(f, "[DECL_TAG]"),
Self::TypeTag => write!(f, "[TYPE_TAG]"),
Self::Enum64 => write!(f, "[ENUM64]"),
}
}
}
@ -1133,43 +1133,43 @@ unsafe fn read_array<T>(data: &[u8], len: usize) -> Result<Vec<T>, BtfError> {
}
impl BtfType {
pub(crate) unsafe fn read(data: &[u8], endianness: Endianness) -> Result<BtfType, BtfError> {
pub(crate) unsafe fn read(data: &[u8], endianness: Endianness) -> Result<Self, BtfError> {
let ty = unsafe { read_array::<u32>(data, 3)? };
let data = &data[mem::size_of::<u32>() * 3..];
let vlen = type_vlen(ty[1]);
Ok(match type_kind(ty[1])? {
BtfKind::Unknown => BtfType::Unknown,
BtfKind::Fwd => BtfType::Fwd(Fwd {
BtfKind::Unknown => Self::Unknown,
BtfKind::Fwd => Self::Fwd(Fwd {
name_offset: ty[0],
info: ty[1],
_unused: 0,
}),
BtfKind::Const => BtfType::Const(Const {
BtfKind::Const => Self::Const(Const {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Volatile => BtfType::Volatile(Volatile {
BtfKind::Volatile => Self::Volatile(Volatile {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Restrict => BtfType::Restrict(Restrict {
BtfKind::Restrict => Self::Restrict(Restrict {
name_offset: ty[0],
_info: ty[1],
btf_type: ty[2],
}),
BtfKind::Ptr => BtfType::Ptr(Ptr {
BtfKind::Ptr => Self::Ptr(Ptr {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Typedef => BtfType::Typedef(Typedef {
BtfKind::Typedef => Self::Typedef(Typedef {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
}),
BtfKind::Func => BtfType::Func(Func {
BtfKind::Func => Self::Func(Func {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
@ -1183,73 +1183,73 @@ impl BtfType {
} else {
u32::from_be_bytes
};
BtfType::Int(Int {
Self::Int(Int {
name_offset: ty[0],
info: ty[1],
size: ty[2],
data: read_u32(data[..mem::size_of::<u32>()].try_into().unwrap()),
})
}
BtfKind::Float => BtfType::Float(Float {
BtfKind::Float => Self::Float(Float {
name_offset: ty[0],
info: ty[1],
size: ty[2],
}),
BtfKind::Enum => BtfType::Enum(Enum {
BtfKind::Enum => Self::Enum(Enum {
name_offset: ty[0],
info: ty[1],
size: ty[2],
variants: unsafe { read_array::<BtfEnum>(data, vlen)? },
}),
BtfKind::Enum64 => BtfType::Enum64(Enum64 {
BtfKind::Enum64 => Self::Enum64(Enum64 {
name_offset: ty[0],
info: ty[1],
size: ty[2],
variants: unsafe { read_array::<BtfEnum64>(data, vlen)? },
}),
BtfKind::Array => BtfType::Array(Array {
BtfKind::Array => Self::Array(Array {
name_offset: ty[0],
info: ty[1],
_unused: 0,
array: unsafe { read(data)? },
}),
BtfKind::Struct => BtfType::Struct(Struct {
BtfKind::Struct => Self::Struct(Struct {
name_offset: ty[0],
info: ty[1],
size: ty[2],
members: unsafe { read_array::<BtfMember>(data, vlen)? },
}),
BtfKind::Union => BtfType::Union(Union {
BtfKind::Union => Self::Union(Union {
name_offset: ty[0],
info: ty[1],
size: ty[2],
members: unsafe { read_array::<BtfMember>(data, vlen)? },
}),
BtfKind::FuncProto => BtfType::FuncProto(FuncProto {
BtfKind::FuncProto => Self::FuncProto(FuncProto {
name_offset: ty[0],
info: ty[1],
return_type: ty[2],
params: unsafe { read_array::<BtfParam>(data, vlen)? },
}),
BtfKind::Var => BtfType::Var(Var {
BtfKind::Var => Self::Var(Var {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
linkage: unsafe { read(data)? },
}),
BtfKind::DataSec => BtfType::DataSec(DataSec {
BtfKind::DataSec => Self::DataSec(DataSec {
name_offset: ty[0],
info: ty[1],
size: ty[2],
entries: unsafe { read_array::<DataSecEntry>(data, vlen)? },
}),
BtfKind::DeclTag => BtfType::DeclTag(DeclTag {
BtfKind::DeclTag => Self::DeclTag(DeclTag {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
component_index: unsafe { read(data)? },
}),
BtfKind::TypeTag => BtfType::TypeTag(TypeTag {
BtfKind::TypeTag => Self::TypeTag(TypeTag {
name_offset: ty[0],
info: ty[1],
btf_type: ty[2],
@ -1259,163 +1259,163 @@ impl BtfType {
pub(crate) fn to_bytes(&self) -> Vec<u8> {
match self {
BtfType::Unknown => vec![],
BtfType::Fwd(t) => t.to_bytes(),
BtfType::Const(t) => t.to_bytes(),
BtfType::Volatile(t) => t.to_bytes(),
BtfType::Restrict(t) => t.to_bytes(),
BtfType::Ptr(t) => t.to_bytes(),
BtfType::Typedef(t) => t.to_bytes(),
BtfType::Func(t) => t.to_bytes(),
BtfType::Int(t) => t.to_bytes(),
BtfType::Float(t) => t.to_bytes(),
BtfType::Enum(t) => t.to_bytes(),
BtfType::Enum64(t) => t.to_bytes(),
BtfType::Array(t) => t.to_bytes(),
BtfType::Struct(t) => t.to_bytes(),
BtfType::Union(t) => t.to_bytes(),
BtfType::FuncProto(t) => t.to_bytes(),
BtfType::Var(t) => t.to_bytes(),
BtfType::DataSec(t) => t.to_bytes(),
BtfType::DeclTag(t) => t.to_bytes(),
BtfType::TypeTag(t) => t.to_bytes(),
Self::Unknown => vec![],
Self::Fwd(t) => t.to_bytes(),
Self::Const(t) => t.to_bytes(),
Self::Volatile(t) => t.to_bytes(),
Self::Restrict(t) => t.to_bytes(),
Self::Ptr(t) => t.to_bytes(),
Self::Typedef(t) => t.to_bytes(),
Self::Func(t) => t.to_bytes(),
Self::Int(t) => t.to_bytes(),
Self::Float(t) => t.to_bytes(),
Self::Enum(t) => t.to_bytes(),
Self::Enum64(t) => t.to_bytes(),
Self::Array(t) => t.to_bytes(),
Self::Struct(t) => t.to_bytes(),
Self::Union(t) => t.to_bytes(),
Self::FuncProto(t) => t.to_bytes(),
Self::Var(t) => t.to_bytes(),
Self::DataSec(t) => t.to_bytes(),
Self::DeclTag(t) => t.to_bytes(),
Self::TypeTag(t) => t.to_bytes(),
}
}
pub(crate) fn size(&self) -> Option<u32> {
match self {
BtfType::Int(t) => Some(t.size),
BtfType::Float(t) => Some(t.size),
BtfType::Enum(t) => Some(t.size),
BtfType::Enum64(t) => Some(t.size),
BtfType::Struct(t) => Some(t.size),
BtfType::Union(t) => Some(t.size),
BtfType::DataSec(t) => Some(t.size),
BtfType::Ptr(_) => Some(mem::size_of::<&()>() as u32),
Self::Int(t) => Some(t.size),
Self::Float(t) => Some(t.size),
Self::Enum(t) => Some(t.size),
Self::Enum64(t) => Some(t.size),
Self::Struct(t) => Some(t.size),
Self::Union(t) => Some(t.size),
Self::DataSec(t) => Some(t.size),
Self::Ptr(_) => Some(mem::size_of::<&()>() as u32),
_ => None,
}
}
pub(crate) fn btf_type(&self) -> Option<u32> {
match self {
BtfType::Const(t) => Some(t.btf_type),
BtfType::Volatile(t) => Some(t.btf_type),
BtfType::Restrict(t) => Some(t.btf_type),
BtfType::Ptr(t) => Some(t.btf_type),
BtfType::Typedef(t) => Some(t.btf_type),
Self::Const(t) => Some(t.btf_type),
Self::Volatile(t) => Some(t.btf_type),
Self::Restrict(t) => Some(t.btf_type),
Self::Ptr(t) => Some(t.btf_type),
Self::Typedef(t) => Some(t.btf_type),
// FuncProto contains the return type here, and doesn't directly reference another type
BtfType::FuncProto(t) => Some(t.return_type),
BtfType::Var(t) => Some(t.btf_type),
BtfType::DeclTag(t) => Some(t.btf_type),
BtfType::TypeTag(t) => Some(t.btf_type),
Self::FuncProto(t) => Some(t.return_type),
Self::Var(t) => Some(t.btf_type),
Self::DeclTag(t) => Some(t.btf_type),
Self::TypeTag(t) => Some(t.btf_type),
_ => None,
}
}
pub(crate) fn type_info_size(&self) -> usize {
match self {
BtfType::Unknown => mem::size_of::<Fwd>(),
BtfType::Fwd(t) => t.type_info_size(),
BtfType::Const(t) => t.type_info_size(),
BtfType::Volatile(t) => t.type_info_size(),
BtfType::Restrict(t) => t.type_info_size(),
BtfType::Ptr(t) => t.type_info_size(),
BtfType::Typedef(t) => t.type_info_size(),
BtfType::Func(t) => t.type_info_size(),
BtfType::Int(t) => t.type_info_size(),
BtfType::Float(t) => t.type_info_size(),
BtfType::Enum(t) => t.type_info_size(),
BtfType::Enum64(t) => t.type_info_size(),
BtfType::Array(t) => t.type_info_size(),
BtfType::Struct(t) => t.type_info_size(),
BtfType::Union(t) => t.type_info_size(),
BtfType::FuncProto(t) => t.type_info_size(),
BtfType::Var(t) => t.type_info_size(),
BtfType::DataSec(t) => t.type_info_size(),
BtfType::DeclTag(t) => t.type_info_size(),
BtfType::TypeTag(t) => t.type_info_size(),
Self::Unknown => mem::size_of::<Fwd>(),
Self::Fwd(t) => t.type_info_size(),
Self::Const(t) => t.type_info_size(),
Self::Volatile(t) => t.type_info_size(),
Self::Restrict(t) => t.type_info_size(),
Self::Ptr(t) => t.type_info_size(),
Self::Typedef(t) => t.type_info_size(),
Self::Func(t) => t.type_info_size(),
Self::Int(t) => t.type_info_size(),
Self::Float(t) => t.type_info_size(),
Self::Enum(t) => t.type_info_size(),
Self::Enum64(t) => t.type_info_size(),
Self::Array(t) => t.type_info_size(),
Self::Struct(t) => t.type_info_size(),
Self::Union(t) => t.type_info_size(),
Self::FuncProto(t) => t.type_info_size(),
Self::Var(t) => t.type_info_size(),
Self::DataSec(t) => t.type_info_size(),
Self::DeclTag(t) => t.type_info_size(),
Self::TypeTag(t) => t.type_info_size(),
}
}
pub(crate) fn name_offset(&self) -> u32 {
match self {
BtfType::Unknown => 0,
BtfType::Fwd(t) => t.name_offset,
BtfType::Const(t) => t.name_offset,
BtfType::Volatile(t) => t.name_offset,
BtfType::Restrict(t) => t.name_offset,
BtfType::Ptr(t) => t.name_offset,
BtfType::Typedef(t) => t.name_offset,
BtfType::Func(t) => t.name_offset,
BtfType::Int(t) => t.name_offset,
BtfType::Float(t) => t.name_offset,
BtfType::Enum(t) => t.name_offset,
BtfType::Enum64(t) => t.name_offset,
BtfType::Array(t) => t.name_offset,
BtfType::Struct(t) => t.name_offset,
BtfType::Union(t) => t.name_offset,
BtfType::FuncProto(t) => t.name_offset,
BtfType::Var(t) => t.name_offset,
BtfType::DataSec(t) => t.name_offset,
BtfType::DeclTag(t) => t.name_offset,
BtfType::TypeTag(t) => t.name_offset,
Self::Unknown => 0,
Self::Fwd(t) => t.name_offset,
Self::Const(t) => t.name_offset,
Self::Volatile(t) => t.name_offset,
Self::Restrict(t) => t.name_offset,
Self::Ptr(t) => t.name_offset,
Self::Typedef(t) => t.name_offset,
Self::Func(t) => t.name_offset,
Self::Int(t) => t.name_offset,
Self::Float(t) => t.name_offset,
Self::Enum(t) => t.name_offset,
Self::Enum64(t) => t.name_offset,
Self::Array(t) => t.name_offset,
Self::Struct(t) => t.name_offset,
Self::Union(t) => t.name_offset,
Self::FuncProto(t) => t.name_offset,
Self::Var(t) => t.name_offset,
Self::DataSec(t) => t.name_offset,
Self::DeclTag(t) => t.name_offset,
Self::TypeTag(t) => t.name_offset,
}
}
pub(crate) fn kind(&self) -> BtfKind {
match self {
BtfType::Unknown => BtfKind::Unknown,
BtfType::Fwd(t) => t.kind(),
BtfType::Const(t) => t.kind(),
BtfType::Volatile(t) => t.kind(),
BtfType::Restrict(t) => t.kind(),
BtfType::Ptr(t) => t.kind(),
BtfType::Typedef(t) => t.kind(),
BtfType::Func(t) => t.kind(),
BtfType::Int(t) => t.kind(),
BtfType::Float(t) => t.kind(),
BtfType::Enum(t) => t.kind(),
BtfType::Enum64(t) => t.kind(),
BtfType::Array(t) => t.kind(),
BtfType::Struct(t) => t.kind(),
BtfType::Union(t) => t.kind(),
BtfType::FuncProto(t) => t.kind(),
BtfType::Var(t) => t.kind(),
BtfType::DataSec(t) => t.kind(),
BtfType::DeclTag(t) => t.kind(),
BtfType::TypeTag(t) => t.kind(),
Self::Unknown => BtfKind::Unknown,
Self::Fwd(t) => t.kind(),
Self::Const(t) => t.kind(),
Self::Volatile(t) => t.kind(),
Self::Restrict(t) => t.kind(),
Self::Ptr(t) => t.kind(),
Self::Typedef(t) => t.kind(),
Self::Func(t) => t.kind(),
Self::Int(t) => t.kind(),
Self::Float(t) => t.kind(),
Self::Enum(t) => t.kind(),
Self::Enum64(t) => t.kind(),
Self::Array(t) => t.kind(),
Self::Struct(t) => t.kind(),
Self::Union(t) => t.kind(),
Self::FuncProto(t) => t.kind(),
Self::Var(t) => t.kind(),
Self::DataSec(t) => t.kind(),
Self::DeclTag(t) => t.kind(),
Self::TypeTag(t) => t.kind(),
}
}
pub(crate) fn is_composite(&self) -> bool {
matches!(self, BtfType::Struct(_) | BtfType::Union(_))
matches!(self, Self::Struct(_) | Self::Union(_))
}
pub(crate) fn members(&self) -> Option<impl Iterator<Item = &BtfMember>> {
match self {
BtfType::Struct(t) => Some(t.members.iter()),
BtfType::Union(t) => Some(t.members.iter()),
Self::Struct(t) => Some(t.members.iter()),
Self::Union(t) => Some(t.members.iter()),
_ => None,
}
}
pub(crate) fn member_bit_field_size(&self, member: &BtfMember) -> Option<usize> {
match self {
BtfType::Struct(t) => Some(t.member_bit_field_size(member)),
BtfType::Union(t) => Some(t.member_bit_field_size(member)),
Self::Struct(t) => Some(t.member_bit_field_size(member)),
Self::Union(t) => Some(t.member_bit_field_size(member)),
_ => None,
}
}
pub(crate) fn member_bit_offset(&self, member: &BtfMember) -> Option<usize> {
match self {
BtfType::Struct(t) => Some(t.member_bit_offset(member)),
BtfType::Union(t) => Some(t.member_bit_offset(member)),
Self::Struct(t) => Some(t.member_bit_offset(member)),
Self::Union(t) => Some(t.member_bit_offset(member)),
_ => None,
}
}
pub(crate) fn is_compatible(&self, other: &BtfType) -> bool {
pub(crate) fn is_compatible(&self, other: &Self) -> bool {
if self.kind() == other.kind() {
return true;
}
@ -1837,7 +1837,7 @@ mod tests {
}
#[test]
pub fn test_read_btf_type_enum64() {
fn test_read_btf_type_enum64() {
let endianness = Endianness::default();
let variants = vec![BtfEnum64::new(0, 0xbbbbbbbbaaaaaaaau64)];
let bpf_type = BtfType::Enum64(Enum64::new(0, false, variants));

@ -49,7 +49,7 @@
//! let instructions = &function.instructions;
//! let data = unsafe {
//! core::slice::from_raw_parts(
//! instructions.as_ptr() as *const u8,
//! instructions.as_ptr().cast(),
//! instructions.len() * core::mem::size_of::<bpf_insn>(),
//! )
//! };
@ -65,7 +65,11 @@
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(clippy::all, missing_docs)]
#![deny(missing_docs)]
#![cfg_attr(
any(feature = "std", test),
expect(unused_crate_dependencies, reason = "used in doctests")
)]
extern crate alloc;
#[cfg(feature = "std")]
@ -74,9 +78,15 @@ extern crate std;
pub mod btf;
#[expect(
clippy::all,
clippy::cast_lossless,
clippy::ptr_as_ptr,
clippy::ref_as_ptr,
clippy::use_self,
missing_docs,
non_camel_case_types,
non_snake_case,
trivial_numeric_casts,
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
pub mod generated;

@ -99,8 +99,8 @@ impl TryFrom<u32> for PinningType {
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(PinningType::None),
1 => Ok(PinningType::ByName),
0 => Ok(Self::None),
1 => Ok(Self::ByName),
pinning_type => Err(PinningError::Unsupported { pinning_type }),
}
}
@ -144,96 +144,96 @@ impl Map {
/// Returns the map type
pub fn map_type(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.map_type,
Map::Btf(m) => m.def.map_type,
Self::Legacy(m) => m.def.map_type,
Self::Btf(m) => m.def.map_type,
}
}
/// Returns the key size in bytes
pub fn key_size(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.key_size,
Map::Btf(m) => m.def.key_size,
Self::Legacy(m) => m.def.key_size,
Self::Btf(m) => m.def.key_size,
}
}
/// Returns the value size in bytes
pub fn value_size(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.value_size,
Map::Btf(m) => m.def.value_size,
Self::Legacy(m) => m.def.value_size,
Self::Btf(m) => m.def.value_size,
}
}
/// Set the value size in bytes
pub fn set_value_size(&mut self, size: u32) {
match self {
Map::Legacy(m) => m.def.value_size = size,
Map::Btf(m) => m.def.value_size = size,
Self::Legacy(m) => m.def.value_size = size,
Self::Btf(m) => m.def.value_size = size,
}
}
/// Returns the max entry number
pub fn max_entries(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.max_entries,
Map::Btf(m) => m.def.max_entries,
Self::Legacy(m) => m.def.max_entries,
Self::Btf(m) => m.def.max_entries,
}
}
/// Sets the max entry number
pub fn set_max_entries(&mut self, v: u32) {
match self {
Map::Legacy(m) => m.def.max_entries = v,
Map::Btf(m) => m.def.max_entries = v,
Self::Legacy(m) => m.def.max_entries = v,
Self::Btf(m) => m.def.max_entries = v,
}
}
/// Returns the map flags
pub fn map_flags(&self) -> u32 {
match self {
Map::Legacy(m) => m.def.map_flags,
Map::Btf(m) => m.def.map_flags,
Self::Legacy(m) => m.def.map_flags,
Self::Btf(m) => m.def.map_flags,
}
}
/// Returns the pinning type of the map
pub fn pinning(&self) -> PinningType {
match self {
Map::Legacy(m) => m.def.pinning,
Map::Btf(m) => m.def.pinning,
Self::Legacy(m) => m.def.pinning,
Self::Btf(m) => m.def.pinning,
}
}
/// Returns the map data
pub fn data(&self) -> &[u8] {
match self {
Map::Legacy(m) => &m.data,
Map::Btf(m) => &m.data,
Self::Legacy(m) => &m.data,
Self::Btf(m) => &m.data,
}
}
/// Returns the map data as mutable
pub fn data_mut(&mut self) -> &mut Vec<u8> {
match self {
Map::Legacy(m) => m.data.as_mut(),
Map::Btf(m) => m.data.as_mut(),
Self::Legacy(m) => m.data.as_mut(),
Self::Btf(m) => m.data.as_mut(),
}
}
/// Returns the section index
pub fn section_index(&self) -> usize {
match self {
Map::Legacy(m) => m.section_index,
Map::Btf(m) => m.section_index,
Self::Legacy(m) => m.section_index,
Self::Btf(m) => m.section_index,
}
}
/// Returns the section kind.
pub fn section_kind(&self) -> EbpfSectionKind {
match self {
Map::Legacy(m) => m.section_kind,
Map::Btf(_) => EbpfSectionKind::BtfMaps,
Self::Legacy(m) => m.section_kind,
Self::Btf(_) => EbpfSectionKind::BtfMaps,
}
}
@ -243,8 +243,8 @@ impl Map {
/// need symbols in order to be relocated.
pub fn symbol_index(&self) -> Option<usize> {
match self {
Map::Legacy(m) => m.symbol_index,
Map::Btf(m) => Some(m.symbol_index),
Self::Legacy(m) => m.symbol_index,
Self::Btf(m) => Some(m.symbol_index),
}
}
}

@ -280,9 +280,7 @@ pub enum ProgramSection {
impl FromStr for ProgramSection {
type Err = ParseError;
fn from_str(section: &str) -> Result<ProgramSection, ParseError> {
use ProgramSection::*;
fn from_str(section: &str) -> Result<Self, ParseError> {
// parse the common case, eg "xdp/program_name" or
// "sk_skb/stream_verdict/program_name"
let mut pieces = section.split('/');
@ -296,13 +294,13 @@ impl FromStr for ProgramSection {
let kind = next()?;
Ok(match kind {
"kprobe" => KProbe,
"kretprobe" => KRetProbe,
"uprobe" => UProbe { sleepable: false },
"uprobe.s" => UProbe { sleepable: true },
"uretprobe" => URetProbe { sleepable: false },
"uretprobe.s" => URetProbe { sleepable: true },
"xdp" | "xdp.frags" => Xdp {
"kprobe" => Self::KProbe,
"kretprobe" => Self::KRetProbe,
"uprobe" => Self::UProbe { sleepable: false },
"uprobe.s" => Self::UProbe { sleepable: true },
"uretprobe" => Self::URetProbe { sleepable: false },
"uretprobe.s" => Self::URetProbe { sleepable: true },
"xdp" | "xdp.frags" => Self::Xdp {
frags: kind == "xdp.frags",
attach_type: match pieces.next() {
None => XdpAttachType::Interface,
@ -315,15 +313,15 @@ impl FromStr for ProgramSection {
}
},
},
"tp_btf" => BtfTracePoint,
"tracepoint" | "tp" => TracePoint,
"socket" => SocketFilter,
"sk_msg" => SkMsg,
"tp_btf" => Self::BtfTracePoint,
"tracepoint" | "tp" => Self::TracePoint,
"socket" => Self::SocketFilter,
"sk_msg" => Self::SkMsg,
"sk_skb" => {
let name = next()?;
match name {
"stream_parser" => SkSkbStreamParser,
"stream_verdict" => SkSkbStreamVerdict,
"stream_parser" => Self::SkSkbStreamParser,
"stream_verdict" => Self::SkSkbStreamVerdict,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
@ -331,13 +329,13 @@ impl FromStr for ProgramSection {
}
}
}
"sockops" => SockOps,
"classifier" => SchedClassifier,
"sockops" => Self::SockOps,
"classifier" => Self::SchedClassifier,
"cgroup_skb" => {
let name = next()?;
match name {
"ingress" => CgroupSkbIngress,
"egress" => CgroupSkbEgress,
"ingress" => Self::CgroupSkbIngress,
"egress" => Self::CgroupSkbEgress,
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
@ -348,64 +346,64 @@ impl FromStr for ProgramSection {
"cgroup" => {
let name = next()?;
match name {
"skb" => CgroupSkb,
"sysctl" => CgroupSysctl,
"dev" => CgroupDevice,
"getsockopt" => CgroupSockopt {
"skb" => Self::CgroupSkb,
"sysctl" => Self::CgroupSysctl,
"dev" => Self::CgroupDevice,
"getsockopt" => Self::CgroupSockopt {
attach_type: CgroupSockoptAttachType::Get,
},
"setsockopt" => CgroupSockopt {
"setsockopt" => Self::CgroupSockopt {
attach_type: CgroupSockoptAttachType::Set,
},
"sock" => CgroupSock {
"sock" => Self::CgroupSock {
attach_type: CgroupSockAttachType::default(),
},
"post_bind4" => CgroupSock {
"post_bind4" => Self::CgroupSock {
attach_type: CgroupSockAttachType::PostBind4,
},
"post_bind6" => CgroupSock {
"post_bind6" => Self::CgroupSock {
attach_type: CgroupSockAttachType::PostBind6,
},
"sock_create" => CgroupSock {
"sock_create" => Self::CgroupSock {
attach_type: CgroupSockAttachType::SockCreate,
},
"sock_release" => CgroupSock {
"sock_release" => Self::CgroupSock {
attach_type: CgroupSockAttachType::SockRelease,
},
"bind4" => CgroupSockAddr {
"bind4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind4,
},
"bind6" => CgroupSockAddr {
"bind6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Bind6,
},
"connect4" => CgroupSockAddr {
"connect4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect4,
},
"connect6" => CgroupSockAddr {
"connect6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::Connect6,
},
"getpeername4" => CgroupSockAddr {
"getpeername4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName4,
},
"getpeername6" => CgroupSockAddr {
"getpeername6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetPeerName6,
},
"getsockname4" => CgroupSockAddr {
"getsockname4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName4,
},
"getsockname6" => CgroupSockAddr {
"getsockname6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::GetSockName6,
},
"sendmsg4" => CgroupSockAddr {
"sendmsg4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg4,
},
"sendmsg6" => CgroupSockAddr {
"sendmsg6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPSendMsg6,
},
"recvmsg4" => CgroupSockAddr {
"recvmsg4" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg4,
},
"recvmsg6" => CgroupSockAddr {
"recvmsg6" => Self::CgroupSockAddr {
attach_type: CgroupSockAddrAttachType::UDPRecvMsg6,
},
_ => {
@ -415,20 +413,20 @@ impl FromStr for ProgramSection {
}
}
}
"lirc_mode2" => LircMode2,
"perf_event" => PerfEvent,
"raw_tp" | "raw_tracepoint" => RawTracePoint,
"lsm" => Lsm { sleepable: false },
"lsm.s" => Lsm { sleepable: true },
"fentry" => FEntry { sleepable: false },
"fentry.s" => FEntry { sleepable: true },
"fexit" => FExit { sleepable: false },
"fexit.s" => FExit { sleepable: true },
"flow_dissector" => FlowDissector,
"freplace" => Extension,
"sk_lookup" => SkLookup,
"iter" => Iter { sleepable: false },
"iter.s" => Iter { sleepable: true },
"lirc_mode2" => Self::LircMode2,
"perf_event" => Self::PerfEvent,
"raw_tp" | "raw_tracepoint" => Self::RawTracePoint,
"lsm" => Self::Lsm { sleepable: false },
"lsm.s" => Self::Lsm { sleepable: true },
"fentry" => Self::FEntry { sleepable: false },
"fentry.s" => Self::FEntry { sleepable: true },
"fexit" => Self::FExit { sleepable: false },
"fexit.s" => Self::FExit { sleepable: true },
"flow_dissector" => Self::FlowDissector,
"freplace" => Self::Extension,
"sk_lookup" => Self::SkLookup,
"iter" => Self::Iter { sleepable: false },
"iter.s" => Self::Iter { sleepable: true },
_ => {
return Err(ParseError::InvalidProgramSection {
section: section.to_owned(),
@ -440,7 +438,7 @@ impl FromStr for ProgramSection {
impl Object {
/// Parses the binary data as an object file into an [Object]
pub fn parse(data: &[u8]) -> Result<Object, ParseError> {
pub fn parse(data: &[u8]) -> Result<Self, ParseError> {
let obj = object::read::File::parse(data).map_err(ParseError::ElfError)?;
let endianness = obj.endianness();
@ -456,7 +454,7 @@ impl Object {
None
};
let mut bpf_obj = Object::new(endianness, license, kernel_version);
let mut bpf_obj = Self::new(endianness, license, kernel_version);
if let Some(symbol_table) = obj.symbol_table() {
for symbol in symbol_table.symbols() {
@ -511,8 +509,8 @@ impl Object {
Ok(bpf_obj)
}
fn new(endianness: Endianness, license: CString, kernel_version: Option<u32>) -> Object {
Object {
fn new(endianness: Endianness, license: CString, kernel_version: Option<u32>) -> Self {
Self {
endianness,
license,
kernel_version,
@ -578,13 +576,13 @@ impl Object {
Ok(())
}
fn parse_btf(&mut self, section: &Section) -> Result<(), BtfError> {
fn parse_btf(&mut self, section: &Section<'_>) -> Result<(), BtfError> {
self.btf = Some(Btf::parse(section.data, self.endianness)?);
Ok(())
}
fn parse_btf_ext(&mut self, section: &Section) -> Result<(), BtfError> {
fn parse_btf_ext(&mut self, section: &Section<'_>) -> Result<(), BtfError> {
self.btf_ext = Some(BtfExt::parse(
section.data,
self.endianness,
@ -593,7 +591,7 @@ impl Object {
Ok(())
}
fn parse_programs(&mut self, section: &Section) -> Result<(), ParseError> {
fn parse_programs(&mut self, section: &Section<'_>) -> Result<(), ParseError> {
let program_section = ProgramSection::from_str(section.name)?;
let syms =
self.symbols_by_section
@ -624,7 +622,7 @@ impl Object {
fn parse_program(
&self,
section: &Section,
section: &Section<'_>,
program_section: ProgramSection,
name: String,
symbol: &Symbol,
@ -660,7 +658,7 @@ impl Object {
))
}
fn parse_text_section(&mut self, section: Section) -> Result<(), ParseError> {
fn parse_text_section(&mut self, section: Section<'_>) -> Result<(), ParseError> {
let mut symbols_by_address = HashMap::new();
for sym in self.symbol_table.values() {
@ -731,7 +729,7 @@ impl Object {
Ok(())
}
fn parse_btf_maps(&mut self, section: &Section) -> Result<(), ParseError> {
fn parse_btf_maps(&mut self, section: &Section<'_>) -> Result<(), ParseError> {
if self.btf.is_none() {
return Err(ParseError::NoBTF);
}
@ -786,7 +784,7 @@ impl Object {
fn parse_maps_section<'a, I: Iterator<Item = &'a usize>>(
&self,
maps: &mut HashMap<String, Map>,
section: &Section,
section: &Section<'_>,
symbols: I,
) -> Result<(), ParseError> {
let mut have_symbols = false;
@ -824,7 +822,7 @@ impl Object {
Ok(())
}
fn parse_section(&mut self, section: Section) -> Result<(), ParseError> {
fn parse_section(&mut self, section: Section<'_>) -> Result<(), ParseError> {
self.section_infos
.insert(section.name.to_owned(), (section.index, section.size));
match section.kind {
@ -886,9 +884,9 @@ impl Object {
}
fn insn_is_helper_call(ins: &bpf_insn) -> bool {
let klass = (ins.code & 0x07) as u32;
let op = (ins.code & 0xF0) as u32;
let src = (ins.code & 0x08) as u32;
let klass = u32::from(ins.code & 0x07);
let op = u32::from(ins.code & 0xF0);
let src = u32::from(ins.code & 0x08);
klass == BPF_JMP && op == BPF_CALL && src == BPF_K && ins.src_reg() == 0 && ins.dst_reg() == 0
}
@ -1029,29 +1027,29 @@ pub enum EbpfSectionKind {
}
impl EbpfSectionKind {
fn from_name(name: &str) -> EbpfSectionKind {
fn from_name(name: &str) -> Self {
if name.starts_with("license") {
EbpfSectionKind::License
Self::License
} else if name.starts_with("version") {
EbpfSectionKind::Version
Self::Version
} else if name.starts_with("maps") {
EbpfSectionKind::Maps
Self::Maps
} else if name.starts_with(".maps") {
EbpfSectionKind::BtfMaps
Self::BtfMaps
} else if name.starts_with(".text") {
EbpfSectionKind::Text
Self::Text
} else if name.starts_with(".bss") {
EbpfSectionKind::Bss
Self::Bss
} else if name.starts_with(".data") {
EbpfSectionKind::Data
Self::Data
} else if name.starts_with(".rodata") {
EbpfSectionKind::Rodata
Self::Rodata
} else if name == ".BTF" {
EbpfSectionKind::Btf
Self::Btf
} else if name == ".BTF.ext" {
EbpfSectionKind::BtfExt
Self::BtfExt
} else {
EbpfSectionKind::Undefined
Self::Undefined
}
}
}
@ -1070,7 +1068,7 @@ struct Section<'a> {
impl<'a> TryFrom<&'a ObjSection<'_, '_>> for Section<'a> {
type Error = ParseError;
fn try_from(section: &'a ObjSection) -> Result<Section<'a>, ParseError> {
fn try_from(section: &'a ObjSection<'_, '_>) -> Result<Self, ParseError> {
let index = section.index();
let map_err = |error| ParseError::SectionError {
index: index.0,
@ -1177,7 +1175,7 @@ fn get_map_field(btf: &Btf, type_id: u32) -> Result<u32, BtfError> {
// Parsed '.bss' '.data' and '.rodata' sections. These sections are arrays of
// bytes and are relocated based on their section index.
fn parse_data_map_section(section: &Section) -> Result<Map, ParseError> {
fn parse_data_map_section(section: &Section<'_>) -> Result<Map, ParseError> {
let (def, data) = match section.kind {
EbpfSectionKind::Data | EbpfSectionKind::Rodata => {
let def = bpf_map_def {
@ -1229,13 +1227,12 @@ fn parse_map_def(name: &str, data: &[u8]) -> Result<bpf_map_def, ParseError> {
if data.len() < mem::size_of::<bpf_map_def>() {
let mut map_def = bpf_map_def::default();
unsafe {
let map_def_ptr =
from_raw_parts_mut(&mut map_def as *mut bpf_map_def as *mut u8, data.len());
let map_def_ptr = from_raw_parts_mut(ptr::from_mut(&mut map_def).cast(), data.len());
map_def_ptr.copy_from_slice(data);
}
Ok(map_def)
} else {
Ok(unsafe { ptr::read_unaligned(data.as_ptr() as *const bpf_map_def) })
Ok(unsafe { ptr::read_unaligned(data.as_ptr().cast()) })
}
}
@ -1403,7 +1400,7 @@ pub fn copy_instructions(data: &[u8]) -> Result<Vec<bpf_insn>, ParseError> {
}
let instructions = data
.chunks_exact(mem::size_of::<bpf_insn>())
.map(|d| unsafe { ptr::read_unaligned(d.as_ptr() as *const bpf_insn) })
.map(|d| unsafe { ptr::read_unaligned(d.as_ptr().cast()) })
.collect::<Vec<_>>();
Ok(instructions)
}
@ -1411,7 +1408,7 @@ pub fn copy_instructions(data: &[u8]) -> Result<Vec<bpf_insn>, ParseError> {
fn get_func_and_line_info(
btf_ext: Option<&BtfExt>,
symbol: &Symbol,
section: &Section,
section: &Section<'_>,
offset: usize,
rewrite_insn_off: bool,
) -> (FuncSecInfo, LineSecInfo, usize, usize) {

@ -16,12 +16,12 @@ pub enum CgroupSockAttachType {
}
impl From<CgroupSockAttachType> for bpf_attach_type {
fn from(s: CgroupSockAttachType) -> bpf_attach_type {
fn from(s: CgroupSockAttachType) -> Self {
match s {
CgroupSockAttachType::PostBind4 => bpf_attach_type::BPF_CGROUP_INET4_POST_BIND,
CgroupSockAttachType::PostBind6 => bpf_attach_type::BPF_CGROUP_INET6_POST_BIND,
CgroupSockAttachType::SockCreate => bpf_attach_type::BPF_CGROUP_INET_SOCK_CREATE,
CgroupSockAttachType::SockRelease => bpf_attach_type::BPF_CGROUP_INET_SOCK_RELEASE,
CgroupSockAttachType::PostBind4 => Self::BPF_CGROUP_INET4_POST_BIND,
CgroupSockAttachType::PostBind6 => Self::BPF_CGROUP_INET6_POST_BIND,
CgroupSockAttachType::SockCreate => Self::BPF_CGROUP_INET_SOCK_CREATE,
CgroupSockAttachType::SockRelease => Self::BPF_CGROUP_INET_SOCK_RELEASE,
}
}
}

@ -31,20 +31,20 @@ pub enum CgroupSockAddrAttachType {
}
impl From<CgroupSockAddrAttachType> for bpf_attach_type {
fn from(s: CgroupSockAddrAttachType) -> bpf_attach_type {
fn from(s: CgroupSockAddrAttachType) -> Self {
match s {
CgroupSockAddrAttachType::Bind4 => bpf_attach_type::BPF_CGROUP_INET4_BIND,
CgroupSockAddrAttachType::Bind6 => bpf_attach_type::BPF_CGROUP_INET6_BIND,
CgroupSockAddrAttachType::Connect4 => bpf_attach_type::BPF_CGROUP_INET4_CONNECT,
CgroupSockAddrAttachType::Connect6 => bpf_attach_type::BPF_CGROUP_INET6_CONNECT,
CgroupSockAddrAttachType::GetPeerName4 => bpf_attach_type::BPF_CGROUP_INET4_GETPEERNAME,
CgroupSockAddrAttachType::GetPeerName6 => bpf_attach_type::BPF_CGROUP_INET6_GETPEERNAME,
CgroupSockAddrAttachType::GetSockName4 => bpf_attach_type::BPF_CGROUP_INET4_GETSOCKNAME,
CgroupSockAddrAttachType::GetSockName6 => bpf_attach_type::BPF_CGROUP_INET6_GETSOCKNAME,
CgroupSockAddrAttachType::UDPSendMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_SENDMSG,
CgroupSockAddrAttachType::UDPSendMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_SENDMSG,
CgroupSockAddrAttachType::UDPRecvMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_RECVMSG,
CgroupSockAddrAttachType::UDPRecvMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_RECVMSG,
CgroupSockAddrAttachType::Bind4 => Self::BPF_CGROUP_INET4_BIND,
CgroupSockAddrAttachType::Bind6 => Self::BPF_CGROUP_INET6_BIND,
CgroupSockAddrAttachType::Connect4 => Self::BPF_CGROUP_INET4_CONNECT,
CgroupSockAddrAttachType::Connect6 => Self::BPF_CGROUP_INET6_CONNECT,
CgroupSockAddrAttachType::GetPeerName4 => Self::BPF_CGROUP_INET4_GETPEERNAME,
CgroupSockAddrAttachType::GetPeerName6 => Self::BPF_CGROUP_INET6_GETPEERNAME,
CgroupSockAddrAttachType::GetSockName4 => Self::BPF_CGROUP_INET4_GETSOCKNAME,
CgroupSockAddrAttachType::GetSockName6 => Self::BPF_CGROUP_INET6_GETSOCKNAME,
CgroupSockAddrAttachType::UDPSendMsg4 => Self::BPF_CGROUP_UDP4_SENDMSG,
CgroupSockAddrAttachType::UDPSendMsg6 => Self::BPF_CGROUP_UDP6_SENDMSG,
CgroupSockAddrAttachType::UDPRecvMsg4 => Self::BPF_CGROUP_UDP4_RECVMSG,
CgroupSockAddrAttachType::UDPRecvMsg6 => Self::BPF_CGROUP_UDP6_RECVMSG,
}
}
}

@ -11,10 +11,10 @@ pub enum CgroupSockoptAttachType {
}
impl From<CgroupSockoptAttachType> for bpf_attach_type {
fn from(s: CgroupSockoptAttachType) -> bpf_attach_type {
fn from(s: CgroupSockoptAttachType) -> Self {
match s {
CgroupSockoptAttachType::Get => bpf_attach_type::BPF_CGROUP_GETSOCKOPT,
CgroupSockoptAttachType::Set => bpf_attach_type::BPF_CGROUP_SETSOCKOPT,
CgroupSockoptAttachType::Get => Self::BPF_CGROUP_GETSOCKOPT,
CgroupSockoptAttachType::Set => Self::BPF_CGROUP_SETSOCKOPT,
}
}
}

@ -16,9 +16,9 @@ pub enum XdpAttachType {
impl From<XdpAttachType> for bpf_attach_type {
fn from(value: XdpAttachType) -> Self {
match value {
XdpAttachType::Interface => bpf_attach_type::BPF_XDP,
XdpAttachType::CpuMap => bpf_attach_type::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => bpf_attach_type::BPF_XDP_DEVMAP,
XdpAttachType::Interface => Self::BPF_XDP,
XdpAttachType::CpuMap => Self::BPF_XDP_CPUMAP,
XdpAttachType::DevMap => Self::BPF_XDP_DEVMAP,
}
}
}

@ -289,8 +289,8 @@ impl<'a> FunctionLinker<'a> {
relocations: &'a HashMap<SectionIndex, HashMap<u64, Relocation>>,
symbol_table: &'a HashMap<usize, Symbol>,
text_sections: &'a HashSet<usize>,
) -> FunctionLinker<'a> {
FunctionLinker {
) -> Self {
Self {
functions,
linked_functions: HashMap::new(),
relocations,
@ -403,7 +403,7 @@ impl<'a> FunctionLinker<'a> {
fun.section_index.0,
(fun.section_offset as i64
+ ((ins_index - start_ins) as i64) * ins_size
+ (ins.imm + 1) as i64 * ins_size) as u64,
+ i64::from(ins.imm + 1) * ins_size) as u64,
)
};
@ -488,14 +488,14 @@ impl<'a> FunctionLinker<'a> {
}
fn insn_is_call(ins: &bpf_insn) -> bool {
let klass = (ins.code & 0x07) as u32;
let op = (ins.code & 0xF0) as u32;
let src = (ins.code & 0x08) as u32;
let klass = u32::from(ins.code & 0x07);
let op = u32::from(ins.code & 0xF0);
let src = u32::from(ins.code & 0x08);
klass == BPF_JMP
&& op == BPF_CALL
&& src == BPF_K
&& ins.src_reg() as u32 == BPF_PSEUDO_CALL
&& u32::from(ins.src_reg()) == BPF_PSEUDO_CALL
&& ins.dst_reg() == 0
&& ins.off == 0
}
@ -520,7 +520,7 @@ mod test {
}
fn ins(bytes: &[u8]) -> bpf_insn {
unsafe { core::ptr::read_unaligned(bytes.as_ptr() as *const _) }
unsafe { core::ptr::read_unaligned(bytes.as_ptr().cast()) }
}
fn fake_legacy_map(symbol_index: usize) -> Map {

@ -1,4 +1,4 @@
use core::{mem, slice};
use core::{mem, ptr, slice};
#[cfg(feature = "std")]
pub(crate) use std::collections::HashMap;
#[cfg(feature = "std")]
@ -11,6 +11,5 @@ pub(crate) use hashbrown::HashSet;
/// bytes_of converts a <T> to a byte slice
pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] {
let ptr: *const _ = val;
unsafe { slice::from_raw_parts(ptr.cast(), mem::size_of_val(val)) }
unsafe { slice::from_raw_parts(ptr::from_ref(val).cast(), mem::size_of_val(val)) }
}

@ -1,3 +1,5 @@
#![expect(unused_crate_dependencies, reason = "used in lib")]
use std::{path::PathBuf, process::exit};
use aya_tool::generate::{InputFile, generate};

@ -1,2 +1,4 @@
#![expect(unused_crate_dependencies, reason = "used in bin")]
pub mod bindgen;
pub mod generate;

@ -1153,7 +1153,7 @@ impl<'a, T: Pod> From<&'a T> for GlobalData<'a> {
fn from(v: &'a T) -> Self {
GlobalData {
// Safety: v is Pod
bytes: unsafe { bytes_of(v) },
bytes: bytes_of(v),
}
}
}

@ -37,42 +37,7 @@
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![deny(
clippy::all,
clippy::cast_lossless,
clippy::cast_precision_loss,
clippy::char_lit_as_u8,
clippy::fn_to_numeric_cast,
clippy::fn_to_numeric_cast_with_truncation,
clippy::unnecessary_cast,
clippy::use_self,
absolute_paths_not_starting_with_crate,
deprecated_in_future,
elided_lifetimes_in_paths,
explicit_outlives_requirements,
ffi_unwind_calls,
keyword_idents,
//let_underscore_drop,
macro_use_extern_crate,
meta_variable_misuse,
missing_abi,
//missing_copy_implementations,
missing_docs,
non_ascii_idents,
noop_method_call,
single_use_lifetimes,
trivial_numeric_casts,
unreachable_pub,
//unsafe_op_in_unsafe_fn,
unstable_features,
unused_crate_dependencies,
unused_extern_crates,
unused_import_braces,
unused_lifetimes,
unused_macro_rules,
//unused_qualifications, https://github.com/rust-lang/rust/commit/9ccc7b7 added size_of to the prelude, but we need to continue to qualify it so that we build on older compilers.
//unused_results,
)]
#![deny(missing_docs)]
mod bpf;
pub mod maps;

@ -307,13 +307,15 @@ mod tests {
}
fn set_next_key<T: Copy>(attr: &bpf_attr, next: T) -> SysResult {
let key = unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.next_key } as *const T as *mut T;
let key =
(unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.next_key } as *const T).cast_mut();
unsafe { *key = next };
Ok(0)
}
fn set_ret<T: Copy>(attr: &bpf_attr, ret: T) -> SysResult {
let value = unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.value } as *const T as *mut T;
let value =
(unsafe { attr.__bindgen_anon_2.__bindgen_anon_1.value } as *const T).cast_mut();
unsafe { *value = ret };
Ok(0)
}

@ -63,8 +63,9 @@ pub struct LpmTrie<T, K, V> {
/// let ipaddr = Ipv4Addr::new(8,8,8,8);
/// let key = Key::new(16, u32::from(ipaddr).to_be());
/// ```
#[derive(Clone, Copy)]
#[repr(C, packed)]
pub struct Key<K: Pod> {
pub struct Key<K> {
prefix_len: u32,
data: K,
}
@ -101,14 +102,6 @@ impl<K: Pod> Key<K> {
}
}
impl<K: Pod> Copy for Key<K> {}
impl<K: Pod> Clone for Key<K> {
fn clone(&self) -> Self {
*self
}
}
// A Pod impl is required as Key struct is a key for a map.
unsafe impl<K: Pod> Pod for Key<K> {}

@ -681,7 +681,7 @@ impl MapData {
pub(crate) fn finalize(&mut self) -> Result<(), MapError> {
let Self { obj, fd } = self;
if !obj.data().is_empty() {
bpf_map_update_elem_ptr(fd.as_fd(), &0 as *const _, obj.data_mut().as_mut_ptr(), 0)
bpf_map_update_elem_ptr(fd.as_fd(), &0, obj.data_mut().as_mut_ptr(), 0)
.map_err(|io_error| SyscallError {
call: "bpf_map_update_elem",
io_error,

@ -380,8 +380,9 @@ impl ProducerData {
panic!("{:?} not in {:?}", offset..offset + len, 0..data.len())
})
};
let header_ptr =
must_get_data(offset, mem::size_of::<AtomicU32>()).as_ptr() as *const AtomicU32;
let header_ptr: *const AtomicU32 = must_get_data(offset, mem::size_of::<AtomicU32>())
.as_ptr()
.cast();
// Pair the kernel's SeqCst write (implies Release) [1] with an Acquire load. This
// ensures data written by the producer will be visible.
//

@ -1,6 +1,6 @@
//! Socket filter programs.
use std::{
io, mem,
io,
os::fd::{AsFd, AsRawFd as _, RawFd},
};
@ -87,8 +87,8 @@ impl SocketFilter {
socket,
SOL_SOCKET,
SO_ATTACH_BPF as i32,
&prog_fd as *const _ as *const _,
mem::size_of::<RawFd>() as u32,
std::ptr::from_ref(&prog_fd).cast(),
std::mem::size_of_val(&prog_fd) as u32,
)
};
if ret < 0 {
@ -144,8 +144,8 @@ impl Link for SocketFilterLink {
self.socket,
SOL_SOCKET,
SO_DETACH_BPF as i32,
&self.prog_fd as *const _ as *const _,
mem::size_of::<RawFd>() as u32,
std::ptr::from_ref(&self.prog_fd).cast(),
std::mem::size_of_val(&self.prog_fd) as u32,
);
}
Ok(())

@ -1,7 +1,7 @@
//! User space probes.
use std::{
error::Error,
ffi::{CStr, OsStr, OsString, c_char},
ffi::{CStr, OsStr, OsString},
fs,
io::{self, BufRead as _, Cursor, Read as _},
mem,
@ -549,12 +549,8 @@ impl LdSoCache {
let read_str = |pos| {
use std::os::unix::ffi::OsStrExt as _;
OsStr::from_bytes(
unsafe {
CStr::from_ptr(
cursor.get_ref()[offset + pos..].as_ptr() as *const c_char
)
}
.to_bytes(),
unsafe { CStr::from_ptr(cursor.get_ref()[offset + pos..].as_ptr().cast()) }
.to_bytes(),
)
.to_owned()
};

@ -4,6 +4,7 @@ use std::{
io, iter,
mem::{self, MaybeUninit},
os::fd::{AsFd as _, AsRawFd as _, BorrowedFd, FromRawFd as _, RawFd},
ptr,
};
use assert_matches::assert_matches;
@ -172,12 +173,12 @@ pub(crate) fn bpf_load_program(
if let Some(btf_fd) = aya_attr.prog_btf_fd {
u.prog_btf_fd = btf_fd.as_raw_fd() as u32;
if aya_attr.line_info_rec_size > 0 {
u.line_info = line_info_buf.as_ptr() as *const _ as u64;
u.line_info = line_info_buf.as_ptr() as u64;
u.line_info_cnt = aya_attr.line_info.len() as u32;
u.line_info_rec_size = aya_attr.line_info_rec_size as u32;
}
if aya_attr.func_info_rec_size > 0 {
u.func_info = func_info_buf.as_ptr() as *const _ as u64;
u.func_info = func_info_buf.as_ptr() as u64;
u.func_info_cnt = aya_attr.func_info.len() as u32;
u.func_info_rec_size = aya_attr.func_info_rec_size as u32;
}
@ -212,9 +213,9 @@ fn lookup<K: Pod, V: Pod>(
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
u.key = ptr::from_ref(key) as u64;
}
u.__bindgen_anon_1.value = &mut value as *mut _ as u64;
u.__bindgen_anon_1.value = ptr::from_mut(&mut value) as u64;
u.flags = flags;
match unit_sys_bpf(cmd, &mut attr) {
@ -264,7 +265,7 @@ pub(crate) fn bpf_map_lookup_elem_ptr<K: Pod, V>(
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
u.key = ptr::from_ref(key) as u64;
}
u.__bindgen_anon_1.value = value as u64;
u.flags = flags;
@ -287,9 +288,9 @@ pub(crate) fn bpf_map_update_elem<K: Pod, V: Pod>(
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
u.key = ptr::from_ref(key) as u64;
}
u.__bindgen_anon_1.value = value as *const _ as u64;
u.__bindgen_anon_1.value = ptr::from_ref(value) as u64;
u.flags = flags;
unit_sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &mut attr)
@ -304,7 +305,7 @@ pub(crate) fn bpf_map_push_elem<V: Pod>(
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
u.__bindgen_anon_1.value = value as *const _ as u64;
u.__bindgen_anon_1.value = ptr::from_ref(value) as u64;
u.flags = flags;
unit_sys_bpf(bpf_cmd::BPF_MAP_UPDATE_ELEM, &mut attr)
@ -342,7 +343,7 @@ pub(crate) fn bpf_map_delete_elem<K: Pod>(fd: BorrowedFd<'_>, key: &K) -> io::Re
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
u.key = key as *const _ as u64;
u.key = ptr::from_ref(key) as u64;
unit_sys_bpf(bpf_cmd::BPF_MAP_DELETE_ELEM, &mut attr)
}
@ -357,9 +358,9 @@ pub(crate) fn bpf_map_get_next_key<K: Pod>(
let u = unsafe { &mut attr.__bindgen_anon_2 };
u.map_fd = fd.as_raw_fd() as u32;
if let Some(key) = key {
u.key = key as *const _ as u64;
u.key = ptr::from_ref(key) as u64;
}
u.__bindgen_anon_1.next_key = &mut next_key as *mut _ as u64;
u.__bindgen_anon_1.next_key = ptr::from_mut(&mut next_key) as u64;
match unit_sys_bpf(bpf_cmd::BPF_MAP_GET_NEXT_KEY, &mut attr) {
Ok(()) => Ok(Some(unsafe { next_key.assume_init() })),
@ -573,7 +574,7 @@ fn bpf_obj_get_info_by_fd<T, F: FnOnce(&mut T)>(
init(&mut info);
attr.info.bpf_fd = fd.as_raw_fd() as u32;
attr.info.info = &info as *const _ as u64;
attr.info.info = ptr::from_ref(&info) as u64;
attr.info.info_len = mem::size_of_val(&info) as u32;
match unit_sys_bpf(bpf_cmd::BPF_OBJ_GET_INFO_BY_FD, &mut attr) {
@ -669,7 +670,7 @@ pub(crate) fn bpf_load_btf(
) -> io::Result<crate::MockableFd> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_7 };
u.btf = raw_btf.as_ptr() as *const _ as u64;
u.btf = raw_btf.as_ptr() as u64;
u.btf_size = mem::size_of_val(raw_btf) as u32;
if !log_buf.is_empty() {
u.btf_log_level = verifier_log_level.bits();

@ -1,6 +1,6 @@
//! Probes and identifies available eBPF features supported by the host kernel.
use std::{mem, os::fd::AsRawFd as _};
use std::{mem, os::fd::AsRawFd as _, ptr};
use aya_obj::{
btf::{Btf, BtfKind},
@ -395,7 +395,7 @@ fn probe_bpf_info<T>(fd: MockableFd, info: T) -> Result<bool, SyscallError> {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
attr.info.bpf_fd = fd.as_raw_fd() as u32;
attr.info.info_len = mem::size_of_val(&info) as u32;
attr.info.info = &info as *const _ as u64;
attr.info.info = ptr::from_ref(&info) as u64;
let io_error = match unit_sys_bpf(bpf_cmd::BPF_OBJ_GET_INFO_BY_FD, &mut attr) {
Ok(()) => return Ok(true),

@ -20,7 +20,11 @@ use libc::{
};
use thiserror::Error;
use crate::{programs::TcAttachType, util::tc_handler_make};
use crate::{
Pod,
programs::TcAttachType,
util::{bytes_of, tc_handler_make},
};
const NLA_HDR_LEN: usize = align_to(mem::size_of::<nlattr>(), NLA_ALIGNTO as usize);
@ -188,7 +192,7 @@ pub(crate) unsafe fn netlink_qdisc_attach(
// add TCA_OPTIONS which includes TCA_BPF_FD, TCA_BPF_NAME and TCA_BPF_FLAGS
let mut options = NestedAttrs::new(&mut attrs_buf[kind_len..], TCA_OPTIONS as u16);
options
.write_attr(TCA_BPF_FD as u16, prog_fd)
.write_attr(TCA_BPF_FD as u16, prog_fd.as_raw_fd())
.map_err(|e| NetlinkError(NetlinkErrorInternal::IoError(e)))?;
options
.write_attr_bytes(TCA_BPF_NAME as u16, prog_name.to_bytes_with_nul())
@ -336,6 +340,7 @@ pub unsafe fn netlink_set_link_up(if_index: i32) -> Result<(), NetlinkError> {
Ok(())
}
#[derive(Copy, Clone)]
#[repr(C)]
struct Request {
header: nlmsghdr,
@ -343,6 +348,9 @@ struct Request {
attrs: [u8; 64],
}
unsafe impl Pod for Request {}
#[derive(Copy, Clone)]
#[repr(C)]
struct TcRequest {
header: nlmsghdr,
@ -350,6 +358,8 @@ struct TcRequest {
attrs: [u8; 64],
}
unsafe impl Pod for TcRequest {}
struct NetlinkSocket {
sock: crate::MockableFd,
_nl_pid: u32,
@ -373,8 +383,8 @@ impl NetlinkSocket {
sock.as_raw_fd(),
SOL_NETLINK,
NETLINK_EXT_ACK,
&enable as *const _ as *const _,
mem::size_of::<i32>() as u32,
std::ptr::from_ref(&enable).cast(),
mem::size_of_val(&enable) as u32,
) < 0
{
return Err(NetlinkErrorInternal::IoError(io::Error::last_os_error()));
@ -385,8 +395,8 @@ impl NetlinkSocket {
sock.as_raw_fd(),
SOL_NETLINK,
NETLINK_CAP_ACK,
&enable as *const _ as *const _,
mem::size_of::<i32>() as u32,
std::ptr::from_ref(&enable).cast(),
mem::size_of_val(&enable) as u32,
) < 0
{
return Err(NetlinkErrorInternal::IoError(io::Error::last_os_error()));
@ -401,8 +411,8 @@ impl NetlinkSocket {
if unsafe {
getsockname(
sock.as_raw_fd(),
&mut addr as *mut _ as *mut _,
&mut addr_len as *mut _,
std::ptr::from_mut(&mut addr).cast(),
std::ptr::from_mut(&mut addr_len).cast(),
)
} < 0
{
@ -416,15 +426,7 @@ impl NetlinkSocket {
}
fn send(&self, msg: &[u8]) -> Result<(), NetlinkErrorInternal> {
if unsafe {
send(
self.sock.as_raw_fd(),
msg.as_ptr() as *const _,
msg.len(),
0,
)
} < 0
{
if unsafe { send(self.sock.as_raw_fd(), msg.as_ptr().cast(), msg.len(), 0) } < 0 {
return Err(NetlinkErrorInternal::IoError(io::Error::last_os_error()));
}
Ok(())
@ -437,14 +439,7 @@ impl NetlinkSocket {
'out: while multipart {
multipart = false;
// Safety: libc wrapper
let len = unsafe {
recv(
self.sock.as_raw_fd(),
buf.as_mut_ptr() as *mut _,
buf.len(),
0,
)
};
let len = unsafe { recv(self.sock.as_raw_fd(), buf.as_mut_ptr().cast(), buf.len(), 0) };
if len < 0 {
return Err(NetlinkErrorInternal::IoError(io::Error::last_os_error()));
}
@ -505,7 +500,7 @@ impl NetlinkMessage {
}
// Safety: nlmsghdr is POD so read is safe
let header = unsafe { ptr::read_unaligned(buf.as_ptr() as *const nlmsghdr) };
let header: nlmsghdr = unsafe { ptr::read_unaligned(buf.as_ptr().cast()) };
let msg_len = header.nlmsg_len as usize;
if msg_len < mem::size_of::<nlmsghdr>() || msg_len > buf.len() {
return Err(io::Error::other("invalid nlmsg_len"));
@ -525,9 +520,7 @@ impl NetlinkMessage {
(
&buf[data_offset + mem::size_of::<nlmsgerr>()..msg_len],
// Safety: nlmsgerr is POD so read is safe
Some(unsafe {
ptr::read_unaligned(buf[data_offset..].as_ptr() as *const nlmsgerr)
}),
Some(unsafe { ptr::read_unaligned(buf[data_offset..].as_ptr().cast()) }),
)
} else {
(&buf[data_offset..msg_len], None)
@ -564,7 +557,7 @@ impl<'a> NestedAttrs<'a> {
}
}
fn write_attr<T>(&mut self, attr_type: u16, value: T) -> Result<usize, io::Error> {
fn write_attr<T: Pod>(&mut self, attr_type: u16, value: T) -> Result<usize, io::Error> {
let size = write_attr(self.buf, self.offset, attr_type, value)?;
self.offset += size;
Ok(size)
@ -588,14 +581,13 @@ impl<'a> NestedAttrs<'a> {
}
}
fn write_attr<T>(
fn write_attr<T: Pod>(
buf: &mut [u8],
offset: usize,
attr_type: u16,
value: T,
) -> Result<usize, io::Error> {
let value =
unsafe { slice::from_raw_parts(&value as *const _ as *const _, mem::size_of::<T>()) };
let value = bytes_of(&value);
write_attr_bytes(buf, offset, attr_type, value)
}
@ -616,10 +608,10 @@ fn write_attr_bytes(
Ok(NLA_HDR_LEN + value_len)
}
fn write_attr_header(buf: &mut [u8], offset: usize, attr: nlattr) -> Result<usize, io::Error> {
let attr =
unsafe { slice::from_raw_parts(&attr as *const _ as *const _, mem::size_of::<nlattr>()) };
unsafe impl Pod for nlattr {}
fn write_attr_header(buf: &mut [u8], offset: usize, attr: nlattr) -> Result<usize, io::Error> {
let attr = bytes_of(&attr);
write_bytes(buf, offset, attr)?;
Ok(NLA_HDR_LEN)
}
@ -663,7 +655,7 @@ impl<'a> Iterator for NlAttrsIterator<'a> {
}));
}
let attr = unsafe { ptr::read_unaligned(buf.as_ptr() as *const nlattr) };
let attr: nlattr = unsafe { ptr::read_unaligned(buf.as_ptr().cast()) };
let len = attr.nla_len as usize;
let align_len = align_to(len, NLA_ALIGNTO as usize);
if len < NLA_HDR_LEN {
@ -723,11 +715,6 @@ unsafe fn request_attributes<T>(req: &mut T, msg_len: usize) -> &mut [u8] {
unsafe { slice::from_raw_parts_mut(attrs_addr, len) }
}
fn bytes_of<T>(val: &T) -> &[u8] {
let size = mem::size_of::<T>();
unsafe { slice::from_raw_parts(slice::from_ref(val).as_ptr().cast(), size) }
}
#[cfg(test)]
mod tests {
use std::ffi::CString;
@ -751,28 +738,32 @@ mod tests {
assert_eq!(len, nla_len);
// read IFLA_XDP
let attr = unsafe { ptr::read_unaligned(buf.as_ptr() as *const nlattr) };
let attr: nlattr = unsafe { ptr::read_unaligned(buf.as_ptr().cast()) };
assert_eq!(attr.nla_type, NLA_F_NESTED as u16 | IFLA_XDP);
assert_eq!(attr.nla_len, nla_len);
// read IFLA_XDP_FD + fd
let attr = unsafe { ptr::read_unaligned(buf[NLA_HDR_LEN..].as_ptr() as *const nlattr) };
let attr: nlattr = unsafe { ptr::read_unaligned(buf[NLA_HDR_LEN..].as_ptr().cast()) };
assert_eq!(attr.nla_type, IFLA_XDP_FD as u16);
assert_eq!(attr.nla_len, (NLA_HDR_LEN + mem::size_of::<u32>()) as u16);
let fd = unsafe { ptr::read_unaligned(buf[NLA_HDR_LEN * 2..].as_ptr() as *const u32) };
let fd: u32 = unsafe { ptr::read_unaligned(buf[NLA_HDR_LEN * 2..].as_ptr().cast()) };
assert_eq!(fd, 42);
// read IFLA_XDP_EXPECTED_FD + fd
let attr = unsafe {
let attr: nlattr = unsafe {
ptr::read_unaligned(
buf[NLA_HDR_LEN * 2 + mem::size_of::<u32>()..].as_ptr() as *const nlattr
buf[NLA_HDR_LEN * 2 + mem::size_of::<u32>()..]
.as_ptr()
.cast(),
)
};
assert_eq!(attr.nla_type, IFLA_XDP_EXPECTED_FD as u16);
assert_eq!(attr.nla_len, (NLA_HDR_LEN + mem::size_of::<u32>()) as u16);
let fd = unsafe {
let fd: u32 = unsafe {
ptr::read_unaligned(
buf[NLA_HDR_LEN * 3 + mem::size_of::<u32>()..].as_ptr() as *const u32
buf[NLA_HDR_LEN * 3 + mem::size_of::<u32>()..]
.as_ptr()
.cast(),
)
};
assert_eq!(fd, 24);

@ -416,9 +416,8 @@ pub(crate) fn page_size() -> usize {
}
// bytes_of converts a <T> to a byte slice
pub(crate) unsafe fn bytes_of<T: Pod>(val: &T) -> &[u8] {
let ptr: *const _ = val;
unsafe { slice::from_raw_parts(ptr.cast(), mem::size_of_val(val)) }
pub(crate) fn bytes_of<T: Pod>(val: &T) -> &[u8] {
unsafe { slice::from_raw_parts(std::ptr::from_ref(val).cast(), mem::size_of_val(val)) }
}
pub(crate) fn bytes_of_slice<T: Pod>(val: &[T]) -> &[u8] {
@ -436,7 +435,7 @@ pub(crate) fn bytes_of_bpf_name(bpf_name: &[core::ffi::c_char; 16]) -> &[u8] {
.rposition(|ch| *ch != 0)
.map(|pos| pos + 1)
.unwrap_or(0);
unsafe { slice::from_raw_parts(bpf_name.as_ptr() as *const _, length) }
unsafe { slice::from_raw_parts(bpf_name.as_ptr().cast(), length) }
}
// MMap corresponds to a memory-mapped region.

@ -1,8 +1,13 @@
#![expect(
clippy::all,
clippy::cast_lossless,
clippy::ptr_as_ptr,
clippy::ref_as_ptr,
clippy::use_self,
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
#![no_std]

@ -28,14 +28,13 @@ fn main() {
}
println!("))");
println!("cargo::rustc-check-cfg=cfg(generic_const_exprs,unstable)");
println!("cargo::rustc-check-cfg=cfg(generic_const_exprs)");
}
#[rustversion::nightly]
fn check_rust_version() {
// TODO(https://github.com/rust-lang/rust/issues/141492): restore this.
// println!("cargo:rustc-cfg=generic_const_exprs");
println!("cargo:rustc-cfg=unstable");
}
#[rustversion::not(nightly)]

@ -50,6 +50,7 @@ unsafe impl<T> FromBtfArgument for *const T {
macro_rules! unsafe_impl_from_btf_argument {
($type:ident) => {
unsafe impl FromBtfArgument for $type {
#[allow(trivial_numeric_casts)]
unsafe fn from_argument(ctx: *const c_void, n: usize) -> Self {
// BTF arguments are exposed as an array of `usize` where `usize` can
// either be treated as a pointer or a primitive type
@ -78,7 +79,7 @@ pub struct PtRegs {
/// A portable wrapper around pt_regs, user_pt_regs and user_regs_struct.
impl PtRegs {
pub fn new(regs: *mut pt_regs) -> Self {
PtRegs { regs }
Self { regs }
}
/// Returns the value of the register used to pass arg `n`.
@ -485,7 +486,7 @@ impl RawTracepointArgs {
/// `bpf_raw_tracepoint_args` raw pointer to allow easier access
/// to raw tracepoint argumetns.
pub fn new(args: *mut bpf_raw_tracepoint_args) -> Self {
RawTracepointArgs { args }
Self { args }
}
/// Returns the n-th argument of the raw tracepoint.
@ -565,6 +566,7 @@ unsafe impl<T> FromRawTracepointArgs for *const T {
macro_rules! unsafe_impl_from_raw_tracepoint_args {
($type:ident) => {
unsafe impl FromRawTracepointArgs for $type {
#[allow(trivial_numeric_casts)]
unsafe fn from_argument(ctx: &bpf_raw_tracepoint_args, n: usize) -> Self {
(unsafe { ctx.args.as_slice(n + 1) })[n] as _
}

@ -26,7 +26,7 @@ impl<T, const M: usize, const F: usize> Array<T, M, F> {
reason = "BPF maps are always used as static variables, therefore this method has to be `const`. `Default::default` is not `const`."
)]
pub const fn new() -> Self {
Array(UnsafeCell::new(ArrayDef::new()))
Self(UnsafeCell::new(ArrayDef::new()))
}
#[inline(always)]
@ -36,7 +36,7 @@ impl<T, const M: usize, const F: usize> Array<T, M, F> {
#[inline(always)]
pub fn get_ptr(&self, index: u32) -> Option<*const T> {
unsafe { self.lookup(index).map(|p| p.as_ptr() as *const T) }
unsafe { self.lookup(index).map(|p| p.as_ptr().cast_const()) }
}
#[inline(always)]

@ -21,7 +21,7 @@ pub use generated::*;
use crate::{
check_bounds_signed,
cty::{c_char, c_long, c_void},
cty::{c_char, c_long},
};
/// Read bytes stored at `src` and store them as a `T`.
@ -53,9 +53,9 @@ use crate::{
pub unsafe fn bpf_probe_read<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();
let ret = generated::bpf_probe_read(
v.as_mut_ptr() as *mut c_void,
v.as_mut_ptr().cast(),
mem::size_of::<T>() as u32,
src as *const c_void,
src.cast(),
);
if ret == 0 {
Ok(v.assume_init())
@ -88,11 +88,7 @@ pub unsafe fn bpf_probe_read<T>(src: *const T) -> Result<T, c_long> {
/// On failure, this function returns a negative value wrapped in an `Err`.
#[inline]
pub unsafe fn bpf_probe_read_buf(src: *const u8, dst: &mut [u8]) -> Result<(), c_long> {
let ret = generated::bpf_probe_read(
dst.as_mut_ptr() as *mut c_void,
dst.len() as u32,
src as *const c_void,
);
let ret = generated::bpf_probe_read(dst.as_mut_ptr().cast(), dst.len() as u32, src.cast());
if ret == 0 { Ok(()) } else { Err(ret) }
}
@ -122,9 +118,9 @@ pub unsafe fn bpf_probe_read_buf(src: *const u8, dst: &mut [u8]) -> Result<(), c
pub unsafe fn bpf_probe_read_user<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();
let ret = generated::bpf_probe_read_user(
v.as_mut_ptr() as *mut c_void,
v.as_mut_ptr().cast(),
mem::size_of::<T>() as u32,
src as *const c_void,
src.cast(),
);
if ret == 0 {
Ok(v.assume_init())
@ -155,11 +151,7 @@ pub unsafe fn bpf_probe_read_user<T>(src: *const T) -> Result<T, c_long> {
/// On failure, this function returns a negative value wrapped in an `Err`.
#[inline]
pub unsafe fn bpf_probe_read_user_buf(src: *const u8, dst: &mut [u8]) -> Result<(), c_long> {
let ret = generated::bpf_probe_read_user(
dst.as_mut_ptr() as *mut c_void,
dst.len() as u32,
src as *const c_void,
);
let ret = generated::bpf_probe_read_user(dst.as_mut_ptr().cast(), dst.len() as u32, src.cast());
if ret == 0 { Ok(()) } else { Err(ret) }
}
@ -189,9 +181,9 @@ pub unsafe fn bpf_probe_read_user_buf(src: *const u8, dst: &mut [u8]) -> Result<
pub unsafe fn bpf_probe_read_kernel<T>(src: *const T) -> Result<T, c_long> {
let mut v: MaybeUninit<T> = MaybeUninit::uninit();
let ret = generated::bpf_probe_read_kernel(
v.as_mut_ptr() as *mut c_void,
v.as_mut_ptr().cast(),
mem::size_of::<T>() as u32,
src as *const c_void,
src.cast(),
);
if ret == 0 {
Ok(v.assume_init())
@ -222,11 +214,8 @@ pub unsafe fn bpf_probe_read_kernel<T>(src: *const T) -> Result<T, c_long> {
/// On failure, this function returns a negative value wrapped in an `Err`.
#[inline]
pub unsafe fn bpf_probe_read_kernel_buf(src: *const u8, dst: &mut [u8]) -> Result<(), c_long> {
let ret = generated::bpf_probe_read_kernel(
dst.as_mut_ptr() as *mut c_void,
dst.len() as u32,
src as *const c_void,
);
let ret =
generated::bpf_probe_read_kernel(dst.as_mut_ptr().cast(), dst.len() as u32, src.cast());
if ret == 0 { Ok(()) } else { Err(ret) }
}
@ -261,11 +250,8 @@ pub unsafe fn bpf_probe_read_kernel_buf(src: *const u8, dst: &mut [u8]) -> Resul
)]
#[inline]
pub unsafe fn bpf_probe_read_str(src: *const u8, dest: &mut [u8]) -> Result<usize, c_long> {
let len = generated::bpf_probe_read_str(
dest.as_mut_ptr() as *mut c_void,
dest.len() as u32,
src as *const c_void,
);
let len =
generated::bpf_probe_read_str(dest.as_mut_ptr().cast(), dest.len() as u32, src.cast());
let len = usize::try_from(len).map_err(|core::num::TryFromIntError { .. }| -1)?;
// this can never happen, it's needed to tell the verifier that len is bounded.
Ok(len.min(dest.len()))
@ -297,11 +283,8 @@ pub unsafe fn bpf_probe_read_str(src: *const u8, dest: &mut [u8]) -> Result<usiz
#[deprecated(note = "Use `bpf_probe_read_user_str_bytes` instead")]
#[inline]
pub unsafe fn bpf_probe_read_user_str(src: *const u8, dest: &mut [u8]) -> Result<usize, c_long> {
let len = generated::bpf_probe_read_user_str(
dest.as_mut_ptr() as *mut c_void,
dest.len() as u32,
src as *const c_void,
);
let len =
generated::bpf_probe_read_user_str(dest.as_mut_ptr().cast(), dest.len() as u32, src.cast());
let len = usize::try_from(len).map_err(|core::num::TryFromIntError { .. }| -1)?;
// this can never happen, it's needed to tell the verifier that len is bounded.
Ok(len.min(dest.len()))
@ -394,11 +377,8 @@ pub unsafe fn bpf_probe_read_user_str_bytes(
src: *const u8,
dest: &mut [u8],
) -> Result<&[u8], c_long> {
let len = generated::bpf_probe_read_user_str(
dest.as_mut_ptr() as *mut c_void,
dest.len() as u32,
src as *const c_void,
);
let len =
generated::bpf_probe_read_user_str(dest.as_mut_ptr().cast(), dest.len() as u32, src.cast());
read_str_bytes(len, dest)
}
@ -447,9 +427,9 @@ fn read_str_bytes(len: i64, dest: &[u8]) -> Result<&[u8], c_long> {
#[inline]
pub unsafe fn bpf_probe_read_kernel_str(src: *const u8, dest: &mut [u8]) -> Result<usize, c_long> {
let len = generated::bpf_probe_read_kernel_str(
dest.as_mut_ptr() as *mut c_void,
dest.as_mut_ptr().cast(),
dest.len() as u32,
src as *const c_void,
src.cast(),
);
let len = usize::try_from(len).map_err(|core::num::TryFromIntError { .. }| -1)?;
// this can never happen, it's needed to tell the verifier that len is bounded.
@ -544,9 +524,9 @@ pub unsafe fn bpf_probe_read_kernel_str_bytes(
dest: &mut [u8],
) -> Result<&[u8], c_long> {
let len = generated::bpf_probe_read_kernel_str(
dest.as_mut_ptr() as *mut c_void,
dest.as_mut_ptr().cast(),
dest.len() as u32,
src as *const c_void,
src.cast(),
);
read_str_bytes(len, dest)
@ -578,11 +558,7 @@ pub unsafe fn bpf_probe_read_kernel_str_bytes(
/// On failure, this function returns a negative value wrapped in an `Err`.
#[inline]
pub unsafe fn bpf_probe_write_user<T>(dst: *mut T, src: *const T) -> Result<(), c_long> {
let ret = generated::bpf_probe_write_user(
dst as *mut c_void,
src as *const c_void,
mem::size_of::<T>() as u32,
);
let ret = generated::bpf_probe_write_user(dst.cast(), src.cast(), mem::size_of::<T>() as u32);
if ret == 0 { Ok(()) } else { Err(ret) }
}
@ -605,7 +581,9 @@ pub unsafe fn bpf_probe_write_user<T>(dst: *mut T, src: *const T) -> Result<(),
#[inline]
pub fn bpf_get_current_comm() -> Result<[u8; 16], c_long> {
let mut comm: [u8; 16usize] = [0; 16];
let ret = unsafe { generated::bpf_get_current_comm(&mut comm as *mut _ as *mut c_void, 16u32) };
let ret = unsafe {
generated::bpf_get_current_comm(comm.as_mut_ptr().cast(), mem::size_of_val(&comm) as u32)
};
if ret == 0 { Ok(comm) } else { Err(ret) }
}
@ -732,8 +710,9 @@ macro_rules! impl_integer_promotion {
/// Create `printk` arguments from integer types.
impl From<$ty> for PrintkArg {
#[inline]
fn from(x: $ty) -> PrintkArg {
PrintkArg((x as $via).to_ne_bytes())
#[allow(trivial_numeric_casts)]
fn from(x: $ty) -> Self {
Self((x as $via).to_ne_bytes())
}
}
)*}
@ -757,7 +736,7 @@ impl_integer_promotion!(
impl<T> From<*const T> for PrintkArg {
#[inline]
fn from(x: *const T) -> Self {
PrintkArg((x as usize).to_ne_bytes())
Self((x as usize).to_ne_bytes())
}
}
@ -765,7 +744,7 @@ impl<T> From<*const T> for PrintkArg {
impl<T> From<*mut T> for PrintkArg {
#[inline]
fn from(x: *mut T) -> Self {
PrintkArg((x as usize).to_ne_bytes())
Self((x as usize).to_ne_bytes())
}
}
@ -808,7 +787,7 @@ pub unsafe fn bpf_printk_impl<const FMT_LEN: usize, const NUM_ARGS: usize>(
let printk: unsafe extern "C" fn(fmt: *const c_char, fmt_size: u32, ...) -> c_long =
mem::transmute(6usize);
let fmt_ptr = fmt.as_ptr() as *const c_char;
let fmt_ptr = fmt.as_ptr().cast();
let fmt_size = fmt.len() as u32;
match NUM_ARGS {
@ -816,9 +795,12 @@ pub unsafe fn bpf_printk_impl<const FMT_LEN: usize, const NUM_ARGS: usize>(
1 => printk(fmt_ptr, fmt_size, args[0]),
2 => printk(fmt_ptr, fmt_size, args[0], args[1]),
3 => printk(fmt_ptr, fmt_size, args[0], args[1], args[2]),
_ => {
generated::bpf_trace_vprintk(fmt_ptr, fmt_size, args.as_ptr() as _, (NUM_ARGS * 8) as _)
}
_ => generated::bpf_trace_vprintk(
fmt_ptr,
fmt_size,
args.as_ptr().cast(),
(NUM_ARGS * 8) as _,
),
}
}
@ -839,6 +821,5 @@ pub fn bpf_strncmp<const N: usize>(s1: &[u8; N], s2: &CStr) -> Ordering {
//
// NB: s1's size must be known at compile time to appease the verifier. This is also the typical
// usage of strncmp in C programs.
unsafe { generated::bpf_strncmp(s1.as_ptr() as *const _, N as u32, s2.as_ptr() as *const _) }
.cmp(&0)
unsafe { generated::bpf_strncmp(s1.as_ptr().cast(), N as u32, s2.as_ptr().cast()) }.cmp(&0)
}

@ -14,7 +14,6 @@
expect(incomplete_features),
feature(generic_const_exprs)
)]
#![cfg_attr(unstable, feature(never_type))]
#![cfg_attr(target_arch = "bpf", feature(asm_experimental_arch))]
#![warn(clippy::cast_lossless, clippy::cast_sign_loss)]
#![no_std]
@ -71,7 +70,7 @@ mod intrinsics {
use super::cty::c_int;
#[unsafe(no_mangle)]
pub unsafe extern "C" fn memset(s: *mut u8, c: c_int, n: usize) {
unsafe extern "C" fn memset(s: *mut u8, c: c_int, n: usize) {
#[expect(clippy::cast_sign_loss)]
let b = c as u8;
for i in 0..n {
@ -80,12 +79,12 @@ mod intrinsics {
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *mut u8, n: usize) {
unsafe extern "C" fn memcpy(dest: *mut u8, src: *mut u8, n: usize) {
unsafe { copy_forward(dest, src, n) }
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn memmove(dest: *mut u8, src: *mut u8, n: usize) {
unsafe extern "C" fn memmove(dest: *mut u8, src: *mut u8, n: usize) {
let delta = (dest as usize).wrapping_sub(src as usize);
if delta >= n {
// We can copy forwards because either dest is far enough ahead of src,
@ -111,9 +110,6 @@ mod intrinsics {
}
}
#[cfg(target_arch = "bpf")]
pub use intrinsics::*;
/// Check if a value is within a range, using conditional forms compatible with
/// the verifier.
#[inline(always)]

@ -17,8 +17,8 @@ pub struct Array<T> {
unsafe impl<T: Sync> Sync for Array<T> {}
impl<T> Array<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Array<T> {
Array {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -32,8 +32,8 @@ impl<T> Array<T> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Array<T> {
Array {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -54,7 +54,7 @@ impl<T> Array<T> {
#[inline(always)]
pub fn get_ptr(&self, index: u32) -> Option<*const T> {
unsafe { self.lookup(index).map(|p| p.as_ptr() as *const T) }
unsafe { self.lookup(index).map(|p| p.as_ptr().cast_const()) }
}
#[inline(always)]

@ -1,6 +1,4 @@
use core::{marker::PhantomData, mem};
use aya_ebpf_cty::c_void;
use core::{marker::PhantomData, mem, ptr};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_BLOOM_FILTER},
@ -15,8 +13,8 @@ pub struct BloomFilter<T> {
}
impl<T> BloomFilter<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> BloomFilter<T> {
BloomFilter {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: build_def::<T>(
BPF_MAP_TYPE_BLOOM_FILTER,
max_entries,
@ -27,8 +25,8 @@ impl<T> BloomFilter<T> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> BloomFilter<T> {
BloomFilter {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: build_def::<T>(
BPF_MAP_TYPE_BLOOM_FILTER,
max_entries,
@ -43,8 +41,8 @@ impl<T> BloomFilter<T> {
pub fn contains(&mut self, value: &T) -> Result<(), i64> {
let ret = unsafe {
bpf_map_peek_elem(
&mut self.def as *mut _ as *mut _,
value as *const _ as *mut c_void,
ptr::from_ref(&self.def).cast_mut().cast(),
ptr::from_ref(value).cast_mut().cast(),
)
};
(ret == 0).then_some(()).ok_or(ret)
@ -54,8 +52,8 @@ impl<T> BloomFilter<T> {
pub fn insert(&mut self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_map_push_elem(
&mut self.def as *mut _ as *mut _,
value as *const _ as *const _,
ptr::from_ref(&self.def).cast_mut().cast(),
ptr::from_ref(value).cast(),
flags,
)
};

@ -22,8 +22,8 @@ pub struct HashMap<K, V> {
unsafe impl<K: Sync, V: Sync> Sync for HashMap<K, V> {}
impl<K, V> HashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
@ -35,8 +35,8 @@ impl<K, V> HashMap<K, V> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> HashMap<K, V> {
HashMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_HASH,
max_entries,
@ -98,8 +98,8 @@ pub struct LruHashMap<K, V> {
unsafe impl<K: Sync, V: Sync> Sync for LruHashMap<K, V> {}
impl<K, V> LruHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
@ -111,8 +111,8 @@ impl<K, V> LruHashMap<K, V> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> LruHashMap<K, V> {
LruHashMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_HASH,
max_entries,
@ -174,8 +174,8 @@ pub struct PerCpuHashMap<K, V> {
unsafe impl<K, V> Sync for PerCpuHashMap<K, V> {}
impl<K, V> PerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
@ -187,8 +187,8 @@ impl<K, V> PerCpuHashMap<K, V> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuHashMap<K, V> {
PerCpuHashMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_PERCPU_HASH,
max_entries,
@ -250,8 +250,8 @@ pub struct LruPerCpuHashMap<K, V> {
unsafe impl<K, V> Sync for LruPerCpuHashMap<K, V> {}
impl<K, V> LruPerCpuHashMap<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
@ -263,8 +263,8 @@ impl<K, V> LruPerCpuHashMap<K, V> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> LruPerCpuHashMap<K, V> {
LruPerCpuHashMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LRU_PERCPU_HASH,
max_entries,
@ -329,16 +329,16 @@ const fn build_def<K, V>(ty: u32, max_entries: u32, flags: u32, pin: PinningType
}
#[inline]
fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
lookup(def.cast(), key).map(|p| p.as_ptr())
unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| unsafe { &*p })
}
#[inline]
fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
get_ptr_mut(def, key).map(|p| p as *const V)
fn get_ptr_mut<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*mut V> {
lookup(def.cast(), key).map(|p| p.as_ptr())
}
#[inline]
unsafe fn get<'a, K, V>(def: *mut bpf_map_def, key: &K) -> Option<&'a V> {
get_ptr(def, key).map(|p| unsafe { &*p })
fn get_ptr<K, V>(def: *mut bpf_map_def, key: &K) -> Option<*const V> {
lookup::<_, V>(def.cast(), key).map(|p| p.as_ptr().cast_const())
}

@ -34,9 +34,9 @@ impl<K> Key<K> {
}
impl<K, V> LpmTrie<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
let flags = flags | BPF_F_NO_PREALLOC;
LpmTrie {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE,
max_entries,
@ -48,9 +48,9 @@ impl<K, V> LpmTrie<K, V> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> LpmTrie<K, V> {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
let flags = flags | BPF_F_NO_PREALLOC;
LpmTrie {
Self {
def: UnsafeCell::new(build_def::<K, V>(
BPF_MAP_TYPE_LPM_TRIE,
max_entries,

@ -15,8 +15,8 @@ pub struct PerCpuArray<T> {
unsafe impl<T> Sync for PerCpuArray<T> {}
impl<T> PerCpuArray<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -30,8 +30,8 @@ impl<T> PerCpuArray<T> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> PerCpuArray<T> {
PerCpuArray {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERCPU_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -55,7 +55,7 @@ impl<T> PerCpuArray<T> {
#[inline(always)]
pub fn get_ptr(&self, index: u32) -> Option<*const T> {
unsafe { self.lookup(index).map(|p| p.as_ptr() as *const T) }
unsafe { self.lookup(index).map(|p| p.as_ptr().cast_const()) }
}
#[inline(always)]

@ -1,4 +1,4 @@
use core::{cell::UnsafeCell, marker::PhantomData, mem};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr};
use crate::{
EbpfContext,
@ -16,8 +16,8 @@ pub struct PerfEventArray<T> {
unsafe impl<T: Sync> Sync for PerfEventArray<T> {}
impl<T> PerfEventArray<T> {
pub const fn new(flags: u32) -> PerfEventArray<T> {
PerfEventArray {
pub const fn new(flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -31,8 +31,8 @@ impl<T> PerfEventArray<T> {
}
}
pub const fn pinned(flags: u32) -> PerfEventArray<T> {
PerfEventArray {
pub const fn pinned(flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -55,10 +55,10 @@ impl<T> PerfEventArray<T> {
unsafe {
bpf_perf_event_output(
ctx.as_ptr(),
self.def.get() as *mut _,
self.def.get().cast(),
flags,
data as *const _ as *mut _,
mem::size_of::<T>() as u64,
ptr::from_ref(data).cast_mut().cast(),
mem::size_of_val(data) as u64,
);
}
}

@ -15,8 +15,8 @@ pub struct PerfEventByteArray {
unsafe impl Sync for PerfEventByteArray {}
impl PerfEventByteArray {
pub const fn new(flags: u32) -> PerfEventByteArray {
PerfEventByteArray {
pub const fn new(flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -29,8 +29,8 @@ impl PerfEventByteArray {
}
}
pub const fn pinned(flags: u32) -> PerfEventByteArray {
PerfEventByteArray {
pub const fn pinned(flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PERF_EVENT_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -52,9 +52,9 @@ impl PerfEventByteArray {
unsafe {
bpf_perf_event_output(
ctx.as_ptr(),
self.def.get() as *mut _,
self.def.get().cast(),
flags,
data.as_ptr() as *mut _,
data.as_ptr().cast_mut().cast(),
data.len() as u64,
);
}

@ -39,8 +39,8 @@ pub struct ProgramArray {
unsafe impl Sync for ProgramArray {}
impl ProgramArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -53,8 +53,8 @@ impl ProgramArray {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> ProgramArray {
ProgramArray {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_PROG_ARRAY,
key_size: mem::size_of::<u32>() as u32,
@ -80,31 +80,11 @@ impl ProgramArray {
///
/// On success, this function **does not return** into the original program.
/// On failure, a negative error is returned, wrapped in `Err()`.
#[cfg(not(unstable))]
pub unsafe fn tail_call<C: EbpfContext>(&self, ctx: &C, index: u32) -> Result<(), c_long> {
let res = unsafe { bpf_tail_call(ctx.as_ptr(), self.def.get().cast(), index) };
if res != 0 {
Err(res)
} else {
unsafe { unreachable_unchecked() }
}
}
/// Perform a tail call into a program indexed by this map.
///
/// # Safety
///
/// This function is inherently unsafe, since it causes control flow to jump into
/// another eBPF program. This can have side effects, such as drop methods not being
/// called. Note that tail calling into an eBPF program is not the same thing as
/// a function call -- control flow never returns to the caller.
///
/// # Return Value
///
/// On success, this function **does not return** into the original program.
/// On failure, a negative error is returned, wrapped in `Err()`.
#[cfg(unstable)]
pub unsafe fn tail_call<C: EbpfContext>(&self, ctx: &C, index: u32) -> Result<!, c_long> {
pub unsafe fn tail_call<C: EbpfContext>(
&self,
ctx: &C,
index: u32,
) -> Result<core::convert::Infallible, c_long> {
let res = unsafe { bpf_tail_call(ctx.as_ptr(), self.def.get().cast(), index) };
if res != 0 {
Err(res)

@ -1,4 +1,4 @@
use core::{cell::UnsafeCell, marker::PhantomData, mem};
use core::{cell::UnsafeCell, marker::PhantomData, mem, ptr};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_QUEUE},
@ -15,8 +15,8 @@ pub struct Queue<T> {
unsafe impl<T: Sync> Sync for Queue<T> {}
impl<T> Queue<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Queue<T> {
Queue {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE,
key_size: 0,
@ -30,8 +30,8 @@ impl<T> Queue<T> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Queue<T> {
Queue {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_QUEUE,
key_size: 0,
@ -46,28 +46,23 @@ impl<T> Queue<T> {
}
pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_map_push_elem(
self.def.get() as *mut _,
value as *const _ as *const _,
flags,
)
};
let ret =
unsafe { bpf_map_push_elem(self.def.get().cast(), ptr::from_ref(value).cast(), flags) };
(ret == 0).then_some(()).ok_or(ret)
}
pub fn pop(&self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_pop_elem(self.def.get() as *mut _, value.as_mut_ptr() as *mut _);
let mut value = mem::MaybeUninit::<T>::uninit();
let ret = bpf_map_pop_elem(self.def.get().cast(), value.as_mut_ptr().cast());
(ret == 0).then_some(value.assume_init())
}
}
pub fn peek(&self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let ret = bpf_map_peek_elem(self.def.get() as *mut _, value.as_mut_ptr() as *mut _);
let mut value = mem::MaybeUninit::<T>::uninit();
let ret = bpf_map_peek_elem(self.def.get().cast(), value.as_mut_ptr().cast());
(ret == 0).then_some(value.assume_init())
}
}

@ -1,6 +1,4 @@
use core::{borrow::Borrow, cell::UnsafeCell, marker::PhantomData, mem};
use aya_ebpf_cty::c_void;
use core::{borrow::Borrow, cell::UnsafeCell, marker::PhantomData, mem, ptr};
use crate::{
EbpfContext as _,
@ -23,8 +21,8 @@ pub struct SockHash<K> {
unsafe impl<K: Sync> Sync for SockHash<K> {}
impl<K> SockHash<K> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32,
@ -38,8 +36,8 @@ impl<K> SockHash<K> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> SockHash<K> {
SockHash {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKHASH,
key_size: mem::size_of::<K>() as u32,
@ -56,9 +54,9 @@ impl<K> SockHash<K> {
pub fn update(&self, key: &mut K, sk_ops: &mut bpf_sock_ops, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_sock_hash_update(
sk_ops as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut c_void,
ptr::from_mut(sk_ops),
self.def.get().cast(),
ptr::from_mut(key).cast(),
flags,
)
};
@ -68,9 +66,9 @@ impl<K> SockHash<K> {
pub fn redirect_msg(&self, ctx: &SkMsgContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_msg_redirect_hash(
ctx.as_ptr() as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut _,
ctx.msg,
self.def.get().cast(),
ptr::from_mut(key).cast(),
flags,
)
}
@ -79,9 +77,9 @@ impl<K> SockHash<K> {
pub fn redirect_skb(&self, ctx: &SkBuffContext, key: &mut K, flags: u64) -> i64 {
unsafe {
bpf_sk_redirect_hash(
ctx.as_ptr() as *mut _,
self.def.get() as *mut _,
key as *mut _ as *mut _,
ctx.skb.skb,
self.def.get().cast(),
ptr::from_mut(key).cast(),
flags,
)
}

@ -20,8 +20,8 @@ pub struct SockMap {
unsafe impl Sync for SockMap {}
impl SockMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> SockMap {
SockMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32,
@ -34,8 +34,8 @@ impl SockMap {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> SockMap {
SockMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_SOCKMAP,
key_size: mem::size_of::<u32>() as u32,

@ -1,4 +1,4 @@
use core::{marker::PhantomData, mem};
use core::{marker::PhantomData, mem, ptr};
use crate::{
bindings::{bpf_map_def, bpf_map_type::BPF_MAP_TYPE_STACK},
@ -13,8 +13,8 @@ pub struct Stack<T> {
}
impl<T> Stack<T> {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Stack<T> {
Stack {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: bpf_map_def {
type_: BPF_MAP_TYPE_STACK,
key_size: 0,
@ -28,8 +28,8 @@ impl<T> Stack<T> {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> Stack<T> {
Stack {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: bpf_map_def {
type_: BPF_MAP_TYPE_STACK,
key_size: 0,
@ -46,8 +46,8 @@ impl<T> Stack<T> {
pub fn push(&self, value: &T, flags: u64) -> Result<(), i64> {
let ret = unsafe {
bpf_map_push_elem(
&self.def as *const _ as *mut _,
value as *const _ as *const _,
ptr::from_ref(&self.def).cast_mut().cast(),
ptr::from_ref(value).cast(),
flags,
)
};
@ -56,10 +56,10 @@ impl<T> Stack<T> {
pub fn pop(&self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let mut value = mem::MaybeUninit::<T>::uninit();
let ret = bpf_map_pop_elem(
&self.def as *const _ as *mut _,
value.as_mut_ptr() as *mut _,
ptr::from_ref(&self.def).cast_mut().cast(),
value.as_mut_ptr().cast(),
);
(ret == 0).then_some(value.assume_init())
}
@ -67,10 +67,10 @@ impl<T> Stack<T> {
pub fn peek(&self) -> Option<T> {
unsafe {
let mut value = mem::MaybeUninit::uninit();
let mut value = mem::MaybeUninit::<T>::uninit();
let ret = bpf_map_peek_elem(
&self.def as *const _ as *mut _,
value.as_mut_ptr() as *mut _,
ptr::from_ref(&self.def).cast_mut().cast(),
value.as_mut_ptr().cast(),
);
(ret == 0).then_some(value.assume_init())
}

@ -17,8 +17,8 @@ unsafe impl Sync for StackTrace {}
const PERF_MAX_STACK_DEPTH: u32 = 127;
impl StackTrace {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> StackTrace {
StackTrace {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32,
@ -31,8 +31,8 @@ impl StackTrace {
}
}
pub const fn pinned(max_entries: u32, flags: u32) -> StackTrace {
StackTrace {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_STACK_TRACE,
key_size: mem::size_of::<u32>() as u32,

@ -52,8 +52,8 @@ impl CpuMap {
/// #[map]
/// static MAP: CpuMap = CpuMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> CpuMap {
CpuMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_CPUMAP,
key_size: mem::size_of::<u32>() as u32,
@ -79,8 +79,8 @@ impl CpuMap {
/// #[map]
/// static MAP: CpuMap = CpuMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> CpuMap {
CpuMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_CPUMAP,
key_size: mem::size_of::<u32>() as u32,

@ -48,8 +48,8 @@ impl DevMap {
/// #[map]
/// static MAP: DevMap = DevMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> DevMap {
DevMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP,
key_size: mem::size_of::<u32>() as u32,
@ -73,8 +73,8 @@ impl DevMap {
/// #[map]
/// static MAP: DevMap = DevMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> DevMap {
DevMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP,
key_size: mem::size_of::<u32>() as u32,

@ -50,8 +50,8 @@ impl DevMapHash {
/// #[map]
/// static MAP: DevMapHash = DevMapHash::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> DevMapHash {
DevMapHash {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP_HASH,
key_size: mem::size_of::<u32>() as u32,
@ -75,8 +75,8 @@ impl DevMapHash {
/// #[map]
/// static MAP: DevMapHash = DevMapHash::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> DevMapHash {
DevMapHash {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_DEVMAP_HASH,
key_size: mem::size_of::<u32>() as u32,

@ -25,7 +25,7 @@ fn try_redirect_map(def: &UnsafeCell<bpf_map_def>, key: u32, flags: u64) -> Resu
// Return XDP_REDIRECT on success, or the value of the two lower bits of the flags argument on
// error. Thus I have no idea why it returns a long (i64) instead of something saner, hence the
// unsigned_abs.
let ret = unsafe { bpf_redirect_map(def.get() as *mut _, key.into(), flags) };
let ret = unsafe { bpf_redirect_map(def.get().cast(), key.into(), flags) };
match ret.unsigned_abs() as u32 {
XDP_REDIRECT => Ok(XDP_REDIRECT),
ret => Err(ret),

@ -69,8 +69,8 @@ impl XskMap {
/// #[map]
/// static SOCKS: XskMap = XskMap::with_max_entries(8, 0);
/// ```
pub const fn with_max_entries(max_entries: u32, flags: u32) -> XskMap {
XskMap {
pub const fn with_max_entries(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_XSKMAP,
key_size: mem::size_of::<u32>() as u32,
@ -94,8 +94,8 @@ impl XskMap {
/// #[map]
/// static SOCKS: XskMap = XskMap::pinned(8, 0);
/// ```
pub const fn pinned(max_entries: u32, flags: u32) -> XskMap {
XskMap {
pub const fn pinned(max_entries: u32, flags: u32) -> Self {
Self {
def: UnsafeCell::new(bpf_map_def {
type_: BPF_MAP_TYPE_XSKMAP,
key_size: mem::size_of::<u32>() as u32,

@ -7,13 +7,13 @@ pub struct DeviceContext {
}
impl DeviceContext {
pub fn new(device: *mut bpf_cgroup_dev_ctx) -> DeviceContext {
DeviceContext { device }
pub fn new(device: *mut bpf_cgroup_dev_ctx) -> Self {
Self { device }
}
}
impl EbpfContext for DeviceContext {
fn as_ptr(&self) -> *mut c_void {
self.device as *mut _
self.device.cast()
}
}

@ -7,8 +7,8 @@ pub struct FEntryContext {
}
impl FEntryContext {
pub fn new(ctx: *mut c_void) -> FEntryContext {
FEntryContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
/// Returns the `n`th argument to passed to the probe function, starting from 0.

@ -7,8 +7,8 @@ pub struct FExitContext {
}
impl FExitContext {
pub fn new(ctx: *mut c_void) -> FExitContext {
FExitContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
/// Returns the `n`th argument to passed to the probe function, starting from 0.

@ -11,9 +11,9 @@ pub struct FlowDissectorContext {
}
impl FlowDissectorContext {
pub fn new(skb: *mut __sk_buff) -> FlowDissectorContext {
pub fn new(skb: *mut __sk_buff) -> Self {
let skb = SkBuff { skb };
FlowDissectorContext { skb }
Self { skb }
}
#[inline]

@ -7,8 +7,8 @@ pub struct LsmContext {
}
impl LsmContext {
pub fn new(ctx: *mut c_void) -> LsmContext {
LsmContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
/// Returns the `n`th argument passed to the LSM hook, starting from 0.

@ -7,8 +7,8 @@ pub struct PerfEventContext {
}
impl PerfEventContext {
pub fn new(ctx: *mut c_void) -> PerfEventContext {
PerfEventContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
}

@ -20,10 +20,8 @@ pub struct ProbeContext {
}
impl ProbeContext {
pub fn new(ctx: *mut c_void) -> ProbeContext {
ProbeContext {
regs: ctx as *mut pt_regs,
}
pub fn new(ctx: *mut c_void) -> Self {
Self { regs: ctx.cast() }
}
/// Returns the `n`th argument to passed to the probe function, starting from 0.
@ -54,6 +52,6 @@ impl ProbeContext {
impl EbpfContext for ProbeContext {
fn as_ptr(&self) -> *mut c_void {
self.regs as *mut c_void
self.regs.cast()
}
}

@ -7,8 +7,8 @@ pub struct RawTracePointContext {
}
impl RawTracePointContext {
pub fn new(ctx: *mut c_void) -> RawTracePointContext {
RawTracePointContext { ctx: ctx.cast() }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx: ctx.cast() }
}
#[expect(clippy::missing_safety_doc)]

@ -20,10 +20,8 @@ pub struct RetProbeContext {
}
impl RetProbeContext {
pub fn new(ctx: *mut c_void) -> RetProbeContext {
RetProbeContext {
regs: ctx as *mut pt_regs,
}
pub fn new(ctx: *mut c_void) -> Self {
Self { regs: ctx.cast() }
}
/// Returns the return value of the probed function.
@ -48,6 +46,6 @@ impl RetProbeContext {
impl EbpfContext for RetProbeContext {
fn as_ptr(&self) -> *mut c_void {
self.regs as *mut c_void
self.regs.cast()
}
}

@ -1,6 +1,7 @@
use core::{
ffi::c_void,
mem::{self, MaybeUninit},
ptr,
};
use aya_ebpf_bindings::helpers::{
@ -17,8 +18,8 @@ pub struct SkBuff {
}
impl SkBuff {
pub fn new(skb: *mut __sk_buff) -> SkBuff {
SkBuff { skb }
pub fn new(skb: *mut __sk_buff) -> Self {
Self { skb }
}
#[expect(clippy::len_without_is_empty)]
@ -65,8 +66,8 @@ impl SkBuff {
let ret = bpf_skb_load_bytes(
self.skb as *const _,
offset as u32,
&mut data as *mut _ as *mut _,
mem::size_of::<T>() as u32,
ptr::from_mut(&mut data).cast(),
mem::size_of_val(&data) as u32,
);
if ret == 0 {
Ok(data.assume_init())
@ -93,9 +94,9 @@ impl SkBuff {
let len_u32 = u32::try_from(len).map_err(|core::num::TryFromIntError { .. }| -1)?;
let ret = unsafe {
bpf_skb_load_bytes(
self.skb as *const _,
self.skb.cast(),
offset as u32,
dst.as_mut_ptr() as *mut _,
dst.as_mut_ptr().cast(),
len_u32,
)
};
@ -106,10 +107,10 @@ impl SkBuff {
pub fn store<T>(&mut self, offset: usize, v: &T, flags: u64) -> Result<(), c_long> {
unsafe {
let ret = bpf_skb_store_bytes(
self.skb as *mut _,
self.skb.cast(),
offset as u32,
v as *const _ as *const _,
mem::size_of::<T>() as u32,
ptr::from_ref(v).cast(),
mem::size_of_val(v) as u32,
flags,
);
if ret == 0 { Ok(()) } else { Err(ret) }
@ -125,7 +126,7 @@ impl SkBuff {
size: u64,
) -> Result<(), c_long> {
unsafe {
let ret = bpf_l3_csum_replace(self.skb as *mut _, offset as u32, from, to, size);
let ret = bpf_l3_csum_replace(self.skb.cast(), offset as u32, from, to, size);
if ret == 0 { Ok(()) } else { Err(ret) }
}
}
@ -139,32 +140,32 @@ impl SkBuff {
flags: u64,
) -> Result<(), c_long> {
unsafe {
let ret = bpf_l4_csum_replace(self.skb as *mut _, offset as u32, from, to, flags);
let ret = bpf_l4_csum_replace(self.skb.cast(), offset as u32, from, to, flags);
if ret == 0 { Ok(()) } else { Err(ret) }
}
}
#[inline]
pub fn adjust_room(&self, len_diff: i32, mode: u32, flags: u64) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_adjust_room(self.as_ptr() as *mut _, len_diff, mode, flags) };
let ret = unsafe { bpf_skb_adjust_room(self.skb, len_diff, mode, flags) };
if ret == 0 { Ok(()) } else { Err(ret) }
}
#[inline]
pub fn clone_redirect(&self, if_index: u32, flags: u64) -> Result<(), c_long> {
let ret = unsafe { bpf_clone_redirect(self.as_ptr() as *mut _, if_index, flags) };
let ret = unsafe { bpf_clone_redirect(self.skb, if_index, flags) };
if ret == 0 { Ok(()) } else { Err(ret) }
}
#[inline]
pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_proto(self.as_ptr() as *mut _, proto, flags) };
let ret = unsafe { bpf_skb_change_proto(self.skb, proto, flags) };
if ret == 0 { Ok(()) } else { Err(ret) }
}
#[inline]
pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) };
let ret = unsafe { bpf_skb_change_type(self.skb, ty) };
if ret == 0 { Ok(()) } else { Err(ret) }
}
@ -175,12 +176,12 @@ impl SkBuff {
/// for reading and writing with direct packet access.
#[inline(always)]
pub fn pull_data(&self, len: u32) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_pull_data(self.as_ptr() as *mut _, len) };
let ret = unsafe { bpf_skb_pull_data(self.skb, len) };
if ret == 0 { Ok(()) } else { Err(ret) }
}
pub(crate) fn as_ptr(&self) -> *mut c_void {
self.skb as *mut _
self.skb.cast()
}
#[inline]
@ -229,9 +230,9 @@ pub struct SkBuffContext {
}
impl SkBuffContext {
pub fn new(skb: *mut __sk_buff) -> SkBuffContext {
pub fn new(skb: *mut __sk_buff) -> Self {
let skb = SkBuff { skb };
SkBuffContext { skb }
Self { skb }
}
#[expect(clippy::len_without_is_empty)]

@ -7,13 +7,13 @@ pub struct SkLookupContext {
}
impl SkLookupContext {
pub fn new(lookup: *mut bpf_sk_lookup) -> SkLookupContext {
SkLookupContext { lookup }
pub fn new(lookup: *mut bpf_sk_lookup) -> Self {
Self { lookup }
}
}
impl EbpfContext for SkLookupContext {
fn as_ptr(&self) -> *mut c_void {
self.lookup as *mut _
self.lookup.cast()
}
}

@ -11,8 +11,8 @@ pub struct SkMsgContext {
}
impl SkMsgContext {
pub fn new(msg: *mut sk_msg_md) -> SkMsgContext {
SkMsgContext { msg }
pub fn new(msg: *mut sk_msg_md) -> Self {
Self { msg }
}
pub fn size(&self) -> u32 {
@ -40,6 +40,6 @@ impl SkMsgContext {
impl EbpfContext for SkMsgContext {
fn as_ptr(&self) -> *mut c_void {
self.msg as *mut _
self.msg.cast()
}
}

@ -7,13 +7,13 @@ pub struct SockContext {
}
impl SockContext {
pub fn new(sock: *mut bpf_sock) -> SockContext {
SockContext { sock }
pub fn new(sock: *mut bpf_sock) -> Self {
Self { sock }
}
}
impl EbpfContext for SockContext {
fn as_ptr(&self) -> *mut c_void {
self.sock as *mut _
self.sock.cast()
}
}

@ -7,13 +7,13 @@ pub struct SockAddrContext {
}
impl SockAddrContext {
pub fn new(sock_addr: *mut bpf_sock_addr) -> SockAddrContext {
SockAddrContext { sock_addr }
pub fn new(sock_addr: *mut bpf_sock_addr) -> Self {
Self { sock_addr }
}
}
impl EbpfContext for SockAddrContext {
fn as_ptr(&self) -> *mut c_void {
self.sock_addr as *mut _
self.sock_addr.cast()
}
}

@ -9,8 +9,8 @@ pub struct SockOpsContext {
}
impl SockOpsContext {
pub fn new(ops: *mut bpf_sock_ops) -> SockOpsContext {
SockOpsContext { ops }
pub fn new(ops: *mut bpf_sock_ops) -> Self {
Self { ops }
}
pub fn op(&self) -> u32 {
@ -65,6 +65,6 @@ impl SockOpsContext {
impl EbpfContext for SockOpsContext {
fn as_ptr(&self) -> *mut c_void {
self.ops as *mut _
self.ops.cast()
}
}

@ -7,13 +7,13 @@ pub struct SockoptContext {
}
impl SockoptContext {
pub fn new(sockopt: *mut bpf_sockopt) -> SockoptContext {
SockoptContext { sockopt }
pub fn new(sockopt: *mut bpf_sockopt) -> Self {
Self { sockopt }
}
}
impl EbpfContext for SockoptContext {
fn as_ptr(&self) -> *mut c_void {
self.sockopt as *mut _
self.sockopt.cast()
}
}

@ -7,13 +7,13 @@ pub struct SysctlContext {
}
impl SysctlContext {
pub fn new(sysctl: *mut bpf_sysctl) -> SysctlContext {
SysctlContext { sysctl }
pub fn new(sysctl: *mut bpf_sysctl) -> Self {
Self { sysctl }
}
}
impl EbpfContext for SysctlContext {
fn as_ptr(&self) -> *mut c_void {
self.sysctl as *mut _
self.sysctl.cast()
}
}

@ -7,9 +7,9 @@ pub struct TcContext {
}
impl TcContext {
pub fn new(skb: *mut __sk_buff) -> TcContext {
pub fn new(skb: *mut __sk_buff) -> Self {
let skb = SkBuff { skb };
TcContext { skb }
Self { skb }
}
#[expect(clippy::len_without_is_empty)]

@ -7,8 +7,8 @@ pub struct BtfTracePointContext {
}
impl BtfTracePointContext {
pub fn new(ctx: *mut c_void) -> BtfTracePointContext {
BtfTracePointContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
/// Returns the `n`th argument of the BTF tracepoint, starting from 0.

@ -7,8 +7,8 @@ pub struct TracePointContext {
}
impl TracePointContext {
pub fn new(ctx: *mut c_void) -> TracePointContext {
TracePointContext { ctx }
pub fn new(ctx: *mut c_void) -> Self {
Self { ctx }
}
#[expect(clippy::missing_safety_doc)]

@ -7,8 +7,8 @@ pub struct XdpContext {
}
impl XdpContext {
pub fn new(ctx: *mut xdp_md) -> XdpContext {
XdpContext { ctx }
pub fn new(ctx: *mut xdp_md) -> Self {
Self { ctx }
}
#[inline]
@ -48,6 +48,6 @@ impl XdpContext {
impl EbpfContext for XdpContext {
fn as_ptr(&self) -> *mut c_void {
self.ctx as *mut _
self.ctx.cast()
}
}

@ -66,8 +66,8 @@ pub mod ring_buf {
}
}
impl<'a> core::iter::Sum<&'a Registers> for Registers {
fn sum<I: Iterator<Item = &'a Registers>>(iter: I) -> Self {
impl<'a> core::iter::Sum<&'a Self> for Registers {
fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
iter.fold(Default::default(), |a, b| a + *b)
}
}

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
#[cfg(not(test))]
extern crate ebpf_panic;
@ -44,7 +45,7 @@ macro_rules! define_array_test {
}
#[uprobe]
pub fn $set_prog(ctx: ProbeContext) -> Result<(), c_long> {
fn $set_prog(ctx: ProbeContext) -> Result<(), c_long> {
let index = ctx.arg(0).ok_or(-1)?;
let value = ctx.arg(1).ok_or(-1)?;
$array_map.set(index, &value, 0)?;
@ -52,7 +53,7 @@ macro_rules! define_array_test {
}
#[uprobe]
pub fn $get_prog(ctx: ProbeContext) -> Result<(), c_long> {
fn $get_prog(ctx: ProbeContext) -> Result<(), c_long> {
let index = ctx.arg(0).ok_or(-1)?;
let value = $array_map.get(index).ok_or(-1)?;
$result_set_fn(GET_INDEX, *value)?;
@ -60,7 +61,7 @@ macro_rules! define_array_test {
}
#[uprobe]
pub fn $get_ptr_prog(ctx: ProbeContext) -> Result<(), c_long> {
fn $get_ptr_prog(ctx: ProbeContext) -> Result<(), c_long> {
let index = ctx.arg(0).ok_or(-1)?;
let value = $array_map.get_ptr(index).ok_or(-1)?;
$result_set_fn(GET_PTR_INDEX, unsafe { *value })?;
@ -68,7 +69,7 @@ macro_rules! define_array_test {
}
#[uprobe]
pub fn $get_ptr_mut_prog(ctx: ProbeContext) -> Result<(), c_long> {
fn $get_ptr_mut_prog(ctx: ProbeContext) -> Result<(), c_long> {
let index = ctx.arg(0).ok_or(-1)?;
let ptr = $array_map.get_ptr_mut(index).ok_or(-1)?;
let value = unsafe { *ptr };

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
helpers::{bpf_probe_read_kernel_str_bytes, bpf_probe_read_user_str_bytes},
@ -54,7 +55,7 @@ static RESULT: Array<TestResult> = Array::with_max_entries(1, 0);
static KERNEL_BUFFER: Array<[u8; RESULT_BUF_LEN]> = Array::with_max_entries(1, 0);
#[uprobe]
pub fn test_bpf_probe_read_user_str_bytes(ctx: ProbeContext) {
fn test_bpf_probe_read_user_str_bytes(ctx: ProbeContext) {
read_str_bytes(
bpf_probe_read_user_str_bytes,
ctx.arg::<*const u8>(0),
@ -63,7 +64,7 @@ pub fn test_bpf_probe_read_user_str_bytes(ctx: ProbeContext) {
}
#[uprobe]
pub fn test_bpf_probe_read_kernel_str_bytes(ctx: ProbeContext) {
fn test_bpf_probe_read_kernel_str_bytes(ctx: ProbeContext) {
read_str_bytes(
bpf_probe_read_kernel_str_bytes,
KERNEL_BUFFER

@ -1,3 +1,4 @@
#![no_std]
#![expect(unused_crate_dependencies, reason = "used in bins")]
// This file exists to enable the library target.

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
#[cfg(not(test))]
extern crate ebpf_panic;
@ -34,21 +35,21 @@ macro_rules! define_linear_ds_test {
static $map_ident: $Type<u64> = $Type::with_max_entries(10, 0);
#[uprobe]
pub fn $push_fn(ctx: ProbeContext) -> Result<(), c_long> {
fn $push_fn(ctx: ProbeContext) -> Result<(), c_long> {
let value = ctx.arg(0).ok_or(-1)?;
$map_ident.push(&value, 0)?;
Ok(())
}
#[uprobe]
pub fn $pop_fn(_: ProbeContext) -> Result<(), c_long> {
fn $pop_fn(_: ProbeContext) -> Result<(), c_long> {
let value = $map_ident.pop().ok_or(-1)?;
result_set(POP_INDEX, value)?;
Ok(())
}
#[uprobe]
pub fn $peek_fn(_: ProbeContext) -> Result<(), c_long> {
fn $peek_fn(_: ProbeContext) -> Result<(), c_long> {
let value = $map_ident.peek().ok_or(-1)?;
result_set(PEEK_INDEX, value)?;
Ok(())

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use core::{
hint::black_box,
@ -24,7 +25,7 @@ const FOUR_KB_ARRAY: [u8; 4096] = [0u8; 4096];
const EIGHT_KB_ARRAY: [u8; 8192] = [0u8; 8192];
#[uprobe]
pub fn test_log(ctx: ProbeContext) {
fn test_log(ctx: ProbeContext) {
debug!(&ctx, "Hello from eBPF!");
error!(
&ctx,
@ -119,7 +120,7 @@ pub fn test_log(ctx: ProbeContext) {
}
#[uprobe]
pub fn test_log_omission(ctx: ProbeContext) {
fn test_log_omission(ctx: ProbeContext) {
debug!(
&ctx,
"This is the last u32: {}",

@ -1,8 +1,6 @@
// Socket Filter program for testing with an arbitrary program with maps.
// This is mainly used in tests with consideration for old kernels.
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
macros::{map, socket_filter},
@ -27,7 +25,7 @@ static MAP_WITH_LOOOONG_NAAAAAAAAME: HashMap<u32, u8> = HashMap::<u32, u8>::with
// Introduced in kernel v3.19.
#[socket_filter]
pub fn simple_prog(_ctx: SkBuffContext) -> i64 {
fn simple_prog(_ctx: SkBuffContext) -> i64 {
// So that these maps show up under the `map_ids` field.
FOO.get(0);
// If we use the literal value `0` instead of the local variable `i`, then an additional

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use core::mem;
@ -37,7 +38,7 @@ struct Value {
static RULES: HashMap<u8, Value> = HashMap::<u8, Value>::with_max_entries(1, BPF_F_NO_PREALLOC);
#[xdp]
pub fn do_dnat(ctx: XdpContext) -> u32 {
fn do_dnat(ctx: XdpContext) -> u32 {
try_do_dnat(ctx).unwrap_or(xdp_action::XDP_DROP)
}

@ -1,12 +1,13 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{bindings::xdp_action, macros::xdp, programs::XdpContext};
#[cfg(not(test))]
extern crate ebpf_panic;
#[xdp]
pub fn ihaveaverylongname(ctx: XdpContext) -> u32 {
fn ihaveaverylongname(ctx: XdpContext) -> u32 {
match unsafe { try_pass(ctx) } {
Ok(ret) => ret,
Err(_) => xdp_action::XDP_ABORTED,

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{bindings::xdp_action, macros::xdp, programs::XdpContext};
#[cfg(not(test))]
@ -8,7 +9,7 @@ extern crate ebpf_panic;
// Note: the `frags` attribute causes this probe to be incompatible with kernel versions < 5.18.0.
// See https://github.com/torvalds/linux/commit/c2f2cdb.
#[xdp(frags)]
pub fn pass(ctx: XdpContext) -> u32 {
fn pass(ctx: XdpContext) -> u32 {
match unsafe { try_pass(ctx) } {
Ok(ret) => ret,
Err(_) => xdp_action::XDP_ABORTED,

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
macros::{map, raw_tracepoint},
@ -14,7 +15,7 @@ use integration_common::raw_tracepoint::SysEnterEvent;
static RESULT: Array<SysEnterEvent> = Array::with_max_entries(1, 0);
#[raw_tracepoint(tracepoint = "sys_enter")]
pub fn sys_enter(ctx: RawTracePointContext) -> i32 {
fn sys_enter(ctx: RawTracePointContext) -> i32 {
let common_type: u16 = unsafe { ctx.arg(0) };
let common_flags: u8 = unsafe { ctx.arg(1) };

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
bindings::xdp_action,
@ -27,7 +28,7 @@ static CPUS: CpuMap = CpuMap::with_max_entries(1, 0);
static HITS: Array<u32> = Array::with_max_entries(2, 0);
#[xdp]
pub fn redirect_sock(ctx: XdpContext) -> u32 {
fn redirect_sock(ctx: XdpContext) -> u32 {
let queue_id = ctx.rx_queue_index();
if SOCKS.get(queue_id) == Some(queue_id) {
// Queue ID matches, redirect to AF_XDP socket.
@ -41,31 +42,31 @@ pub fn redirect_sock(ctx: XdpContext) -> u32 {
}
#[xdp]
pub fn redirect_dev(_ctx: XdpContext) -> u32 {
fn redirect_dev(_ctx: XdpContext) -> u32 {
inc_hit(0);
DEVS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp]
pub fn redirect_dev_hash(_ctx: XdpContext) -> u32 {
fn redirect_dev_hash(_ctx: XdpContext) -> u32 {
inc_hit(0);
DEVS_HASH.redirect(10, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp]
pub fn redirect_cpu(_ctx: XdpContext) -> u32 {
fn redirect_cpu(_ctx: XdpContext) -> u32 {
inc_hit(0);
CPUS.redirect(0, 0).unwrap_or(xdp_action::XDP_ABORTED)
}
#[xdp(map = "cpumap")]
pub fn redirect_cpu_chain(_ctx: XdpContext) -> u32 {
fn redirect_cpu_chain(_ctx: XdpContext) -> u32 {
inc_hit(1);
xdp_action::XDP_PASS
}
#[xdp(map = "devmap")]
pub fn redirect_dev_chain(_ctx: XdpContext) -> u32 {
fn redirect_dev_chain(_ctx: XdpContext) -> u32 {
inc_hit(1);
xdp_action::XDP_PASS
}

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use core::hint;
@ -15,7 +16,7 @@ extern crate ebpf_panic;
static RESULTS: Array<u64> = Array::with_max_entries(3, 0);
#[uprobe]
pub fn test_64_32_call_relocs(_ctx: ProbeContext) {
fn test_64_32_call_relocs(_ctx: ProbeContext) {
// this will link set_result and do a forward call
set_result(0, hint::black_box(1));

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
macros::{map, uprobe},
@ -21,7 +22,7 @@ static RING_BUF: RingBuf = RingBuf::with_byte_size(0, 0);
static REGISTERS: PerCpuArray<Registers> = PerCpuArray::with_max_entries(1, 0);
#[uprobe]
pub fn ring_buf_test(ctx: ProbeContext) {
fn ring_buf_test(ctx: ProbeContext) {
let Registers { dropped, rejected } = match REGISTERS.get_ptr_mut(0) {
Some(regs) => unsafe { &mut *regs },
None => return,

@ -1,8 +1,6 @@
// Socket Filter program for testing with an arbitrary program.
// This is mainly used in tests with consideration for old kernels.
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{macros::socket_filter, programs::SkBuffContext};
#[cfg(not(test))]
@ -10,6 +8,6 @@ extern crate ebpf_panic;
// Introduced in kernel v3.19.
#[socket_filter]
pub fn simple_prog(_ctx: SkBuffContext) -> i64 {
fn simple_prog(_ctx: SkBuffContext) -> i64 {
0
}

@ -1,5 +1,6 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{
cty::c_long,
@ -16,7 +17,7 @@ extern crate ebpf_panic;
static RESULT: Array<TestResult> = Array::with_max_entries(1, 0);
#[uprobe]
pub fn test_bpf_strncmp(ctx: ProbeContext) -> Result<(), c_long> {
fn test_bpf_strncmp(ctx: ProbeContext) -> Result<(), c_long> {
let s1: *const u8 = ctx.arg(0).ok_or(-1)?;
let mut b1 = [0u8; 3];
let _: &[u8] = unsafe { bpf_probe_read_user_str_bytes(s1, &mut b1) }?;

@ -1,11 +1,12 @@
#![no_std]
#![no_main]
#![expect(unused_crate_dependencies, reason = "used in other bins")]
use aya_ebpf::{bindings::tcx_action_base::TCX_NEXT, macros::classifier, programs::TcContext};
#[cfg(not(test))]
extern crate ebpf_panic;
#[classifier]
pub fn tcx_next(_ctx: TcContext) -> i32 {
fn tcx_next(_ctx: TcContext) -> i32 {
TCX_NEXT
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save