diff --git a/Cargo.toml b/Cargo.toml index 9ccb29de..2f1efab4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,9 +14,6 @@ panic = "abort" [profile.release] panic = "abort" -[profile.dev.package.integration-ebpf] -opt-level = 2 -overflow-checks = false - [profile.release.package.integration-ebpf] debug = 2 +codegen-units = 1 diff --git a/aya-log-common/Cargo.toml b/aya-log-common/Cargo.toml index 19c97fce..7413d8b7 100644 --- a/aya-log-common/Cargo.toml +++ b/aya-log-common/Cargo.toml @@ -15,7 +15,7 @@ userspace = [ "aya" ] [dependencies] aya = { path = "../aya", version = "0.11.0", optional=true } -num_enum = { version = "0.5", default-features = false } +num_enum = { version = "0.6", default-features = false } [lib] path = "src/lib.rs" diff --git a/aya-obj/src/btf/btf.rs b/aya-obj/src/btf/btf.rs index f439ee1a..40494ef0 100644 --- a/aya-obj/src/btf/btf.rs +++ b/aya-obj/src/btf/btf.rs @@ -432,6 +432,19 @@ impl Btf { // Sanitize DATASEC if they are not supported BtfType::DataSec(d) if !features.btf_datasec => { debug!("{}: not supported. replacing with STRUCT", kind); + + // STRUCT aren't allowed to have "." in their name, fixup this if needed. + let mut name_offset = t.name_offset(); + let sec_name = self.string_at(name_offset)?; + let name = sec_name.to_string(); + + // Handle any "." characters in struct names + // Example: ".maps" + let fixed_name = name.replace('.', "_"); + if fixed_name != name { + name_offset = self.add_string(fixed_name); + } + let mut members = vec![]; for member in d.entries.iter() { let mt = types.type_by_id(member.btf_type).unwrap(); @@ -441,7 +454,9 @@ impl Btf { offset: member.offset * 8, }) } - types.types[i] = BtfType::Struct(Struct::new(t.name_offset(), members, 0)); + + types.types[i] = + BtfType::Struct(Struct::new(name_offset, members, d.entries.len() as u32)); } // Fixup DATASEC // DATASEC sizes aren't always set by LLVM @@ -536,22 +551,39 @@ impl Btf { types.types[i] = enum_type; } // Sanitize FUNC - BtfType::Func(ty) if !features.btf_func => { - debug!("{}: not supported. replacing with TYPEDEF", kind); - let typedef_type = BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type)); - types.types[i] = typedef_type; - } - // Sanitize BTF_FUNC_GLOBAL - BtfType::Func(ty) if !features.btf_func_global => { - let mut fixed_ty = ty.clone(); - if ty.linkage() == FuncLinkage::Global { - debug!( - "{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC", - kind - ); - fixed_ty.set_linkage(FuncLinkage::Static); + BtfType::Func(ty) => { + let name = self.string_at(ty.name_offset)?; + // Sanitize FUNC + if !features.btf_func { + debug!("{}: not supported. replacing with TYPEDEF", kind); + let typedef_type = + BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type)); + types.types[i] = typedef_type; + } else if !features.btf_func_global + || name == "memset" + || name == "memcpy" + || name == "memmove" + || name == "memcmp" + { + // Sanitize BTF_FUNC_GLOBAL when not supported and ensure that + // memory builtins are marked as static. Globals are type checked + // and verified separately from their callers, while instead we + // want tracking info (eg bound checks) to be propagated to the + // memory builtins. + let mut fixed_ty = ty.clone(); + if ty.linkage() == FuncLinkage::Global { + if !features.btf_func_global { + debug!( + "{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC", + kind + ); + } else { + debug!("changing FUNC {name} linkage to BTF_FUNC_STATIC"); + } + fixed_ty.set_linkage(FuncLinkage::Static); + } + types.types[i] = BtfType::Func(fixed_ty); } - types.types[i] = BtfType::Func(fixed_ty); } // Sanitize FLOAT BtfType::Float(ty) if !features.btf_float => { @@ -1045,9 +1077,7 @@ mod tests { let name_offset = btf.add_string("&mut int".to_string()); let ptr_type_id = btf.add_type(BtfType::Ptr(Ptr::new(name_offset, int_type_id))); - let features = BtfFeatures { - ..Default::default() - }; + let features = Default::default(); btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features) .unwrap(); @@ -1118,7 +1148,7 @@ mod tests { VarLinkage::Static, ))); - let name_offset = btf.add_string(".data".to_string()); + let name_offset = btf.add_string("data".to_string()); let variables = vec![DataSecEntry { btf_type: var_type_id, offset: 0, @@ -1352,6 +1382,60 @@ mod tests { Btf::parse(&raw, Endianness::default()).unwrap(); } + #[test] + fn test_sanitize_mem_builtins() { + let mut btf = Btf::new(); + let name_offset = btf.add_string("int".to_string()); + let int_type_id = btf.add_type(BtfType::Int(Int::new( + name_offset, + 4, + IntEncoding::Signed, + 0, + ))); + + let params = vec![ + BtfParam { + name_offset: btf.add_string("a".to_string()), + btf_type: int_type_id, + }, + BtfParam { + name_offset: btf.add_string("b".to_string()), + btf_type: int_type_id, + }, + ]; + let func_proto_type_id = + btf.add_type(BtfType::FuncProto(FuncProto::new(params, int_type_id))); + + let builtins = ["memset", "memcpy", "memcmp", "memmove"]; + for fname in builtins { + let func_name_offset = btf.add_string(fname.to_string()); + let func_type_id = btf.add_type(BtfType::Func(Func::new( + func_name_offset, + func_proto_type_id, + FuncLinkage::Global, + ))); + + let features = BtfFeatures { + btf_func: true, + btf_func_global: true, // to force function name check + ..Default::default() + }; + + btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features) + .unwrap(); + + if let BtfType::Func(fixed) = btf.type_by_id(func_type_id).unwrap() { + assert!(fixed.linkage() == FuncLinkage::Static); + } else { + panic!("not a func") + } + + // Ensure we can convert to bytes and back again + let raw = btf.to_bytes(); + Btf::parse(&raw, Endianness::default()).unwrap(); + } + } + #[test] fn test_sanitize_float() { let mut btf = Btf::new(); diff --git a/aya-obj/src/lib.rs b/aya-obj/src/lib.rs index 438e38f2..3775295e 100644 --- a/aya-obj/src/lib.rs +++ b/aya-obj/src/lib.rs @@ -37,8 +37,9 @@ //! let bytes = std::fs::read("program.o").unwrap(); //! let mut object = Object::parse(&bytes).unwrap(); //! // Relocate the programs -//! object.relocate_calls().unwrap(); -//! object.relocate_maps(std::iter::empty()).unwrap(); +//! let text_sections = std::collections::HashSet::new(); +//! object.relocate_calls(&text_sections).unwrap(); +//! object.relocate_maps(std::iter::empty(), &text_sections).unwrap(); //! //! // Run with rbpf //! let instructions = &object.programs["prog_name"].function.instructions; diff --git a/aya-obj/src/maps.rs b/aya-obj/src/maps.rs index a6c3c96c..22a88da9 100644 --- a/aya-obj/src/maps.rs +++ b/aya-obj/src/maps.rs @@ -2,7 +2,10 @@ use core::mem; -use crate::thiserror::{self, Error}; +use crate::{ + thiserror::{self, Error}, + BpfSectionKind, +}; use alloc::vec::Vec; /// Invalid map type encontered @@ -139,33 +142,6 @@ pub struct bpf_map_def { /// The first five __u32 of `bpf_map_def` must be defined. pub(crate) const MINIMUM_MAP_SIZE: usize = mem::size_of::() * 5; -/// Kinds of maps -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum MapKind { - /// A map holding `.bss` section data - Bss, - /// A map holding `.data` section data - Data, - /// A map holding `.rodata` section data - Rodata, - /// Other maps - Other, -} - -impl From<&str> for MapKind { - fn from(s: &str) -> Self { - if s == ".bss" { - MapKind::Bss - } else if s.starts_with(".data") { - MapKind::Data - } else if s.starts_with(".rodata") { - MapKind::Rodata - } else { - MapKind::Other - } - } -} - /// Map data defined in `maps` or `.maps` sections #[derive(Debug, Clone)] pub enum Map { @@ -248,14 +224,6 @@ impl Map { } } - /// Returns the map kind - pub fn kind(&self) -> MapKind { - match self { - Map::Legacy(m) => m.kind, - Map::Btf(m) => m.kind, - } - } - /// Returns the section index pub fn section_index(&self) -> usize { match self { @@ -264,11 +232,22 @@ impl Map { } } - /// Returns the symbol index - pub fn symbol_index(&self) -> usize { + /// Returns the section kind. + pub fn section_kind(&self) -> BpfSectionKind { + match self { + Map::Legacy(m) => m.section_kind, + Map::Btf(_) => BpfSectionKind::BtfMaps, + } + } + + /// Returns the symbol index. + /// + /// This is `None` for data maps (.bss, .data and .rodata) since those don't + /// need symbols in order to be relocated. + pub fn symbol_index(&self) -> Option { match self { Map::Legacy(m) => m.symbol_index, - Map::Btf(m) => m.symbol_index, + Map::Btf(m) => Some(m.symbol_index), } } } @@ -283,12 +262,16 @@ pub struct LegacyMap { pub def: bpf_map_def, /// The section index pub section_index: usize, - /// The symbol index - pub symbol_index: usize, + /// The section kind + pub section_kind: BpfSectionKind, + /// The symbol index. + /// + /// This is None for data maps (.bss .data and .rodata). We don't need + /// symbols to relocate those since they don't contain multiple maps, but + /// are just a flat array of bytes. + pub symbol_index: Option, /// The map data pub data: Vec, - /// The map kind - pub kind: MapKind, } /// A BTF-defined map, most likely from a `.maps` section. @@ -298,6 +281,5 @@ pub struct BtfMap { pub def: BtfMapDef, pub(crate) section_index: usize, pub(crate) symbol_index: usize, - pub(crate) kind: MapKind, pub(crate) data: Vec, } diff --git a/aya-obj/src/obj.rs b/aya-obj/src/obj.rs index 54d884cb..c7f72cf0 100644 --- a/aya-obj/src/obj.rs +++ b/aya-obj/src/obj.rs @@ -15,7 +15,9 @@ use object::{ }; use crate::{ - maps::{BtfMap, LegacyMap, Map, MapKind, MINIMUM_MAP_SIZE}, + btf::BtfFeatures, + generated::{BPF_CALL, BPF_JMP, BPF_K}, + maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE}, relocation::*, thiserror::{self, Error}, util::HashMap, @@ -33,6 +35,16 @@ use crate::btf::{Array, DataSecEntry, FuncSecInfo, LineSecInfo}; const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE; +/// Features implements BPF and BTF feature detection +#[derive(Default, Debug)] +#[allow(missing_docs)] +pub struct Features { + pub bpf_name: bool, + pub bpf_probe_read_kernel: bool, + pub bpf_perf_link: bool, + pub btf: Option, +} + /// The loaded object file representation #[derive(Clone)] pub struct Object { @@ -52,14 +64,13 @@ pub struct Object { /// in [ProgramSection]s as keys. pub programs: HashMap, /// Functions - pub functions: HashMap, + pub functions: HashMap<(usize, u64), Function>, pub(crate) relocations: HashMap>, - pub(crate) symbols_by_index: HashMap, + pub(crate) symbol_table: HashMap, pub(crate) section_sizes: HashMap, // symbol_offset_by_name caches symbols that could be referenced from a // BTF VAR type so the offsets can be fixed up pub(crate) symbol_offset_by_name: HashMap, - pub(crate) text_section_index: Option, } /// An eBPF program @@ -524,7 +535,7 @@ impl Object { is_definition: symbol.is_definition(), kind: symbol.kind(), }; - bpf_obj.symbols_by_index.insert(symbol.index().0, sym); + bpf_obj.symbol_table.insert(symbol.index().0, sym); if symbol.is_global() || symbol.kind() == SymbolKind::Data { bpf_obj.symbol_offset_by_name.insert(name, symbol.address()); @@ -566,17 +577,16 @@ impl Object { programs: HashMap::new(), functions: HashMap::new(), relocations: HashMap::new(), - symbols_by_index: HashMap::new(), + symbol_table: HashMap::new(), section_sizes: HashMap::new(), symbol_offset_by_name: HashMap::new(), - text_section_index: None, } } /// Patches map data pub fn patch_map_data(&mut self, globals: HashMap<&str, &[u8]>) -> Result<(), ParseError> { let symbols: HashMap = self - .symbols_by_index + .symbol_table .iter() .filter(|(_, s)| s.name.is_some()) .map(|(_, s)| (s.name.as_ref().unwrap().clone(), s)) @@ -670,12 +680,10 @@ impl Object { }) } - fn parse_text_section(&mut self, mut section: Section) -> Result<(), ParseError> { - self.text_section_index = Some(section.index.0); - + fn parse_text_section(&mut self, section: Section) -> Result<(), ParseError> { let mut symbols_by_address = HashMap::new(); - for sym in self.symbols_by_index.values() { + for sym in self.symbol_table.values() { if sym.is_definition && sym.kind == SymbolKind::Text && sym.section_index == Some(section.index.0) @@ -731,7 +739,7 @@ impl Object { }; self.functions.insert( - sym.address, + (section.index.0, sym.address), Function { address, name: sym.name.clone().unwrap(), @@ -755,7 +763,7 @@ impl Object { section.index, section .relocations - .drain(..) + .into_iter() .map(|rel| (rel.offset, rel)) .collect(), ); @@ -764,37 +772,6 @@ impl Object { Ok(()) } - fn parse_map_section( - &mut self, - section: &Section, - symbols: Vec, - ) -> Result<(), ParseError> { - if symbols.is_empty() { - return Err(ParseError::NoSymbolsInMapSection {}); - } - for (i, sym) in symbols.iter().enumerate() { - let start = sym.address as usize; - let end = start + sym.size as usize; - let data = §ion.data[start..end]; - let name = sym - .name - .as_ref() - .ok_or(ParseError::MapSymbolNameNotFound { i })?; - let def = parse_map_def(name, data)?; - self.maps.insert( - name.to_string(), - Map::Legacy(LegacyMap { - section_index: section.index.0, - symbol_index: sym.index, - def, - data: Vec::new(), - kind: MapKind::Other, - }), - ); - } - Ok(()) - } - fn parse_btf_maps( &mut self, section: &Section, @@ -827,7 +804,6 @@ impl Object { def, section_index: section.index.0, symbol_index, - kind: MapKind::Other, data: Vec::new(), }), ); @@ -838,7 +814,7 @@ impl Object { Ok(()) } - fn parse_section(&mut self, mut section: Section) -> Result<(), ParseError> { + fn parse_section(&mut self, section: Section) -> Result<(), ParseError> { let mut parts = section.name.rsplitn(2, '/').collect::>(); parts.reverse(); @@ -853,16 +829,16 @@ impl Object { self.section_sizes .insert(section.name.to_owned(), section.size); match section.kind { - BpfSectionKind::Data => { + BpfSectionKind::Data | BpfSectionKind::Rodata | BpfSectionKind::Bss => { self.maps - .insert(section.name.to_string(), parse_map(§ion, section.name)?); + .insert(section.name.to_string(), parse_data_map_section(§ion)?); } BpfSectionKind::Text => self.parse_text_section(section)?, BpfSectionKind::Btf => self.parse_btf(§ion)?, BpfSectionKind::BtfExt => self.parse_btf_ext(§ion)?, BpfSectionKind::BtfMaps => { let symbols: HashMap = self - .symbols_by_index + .symbol_table .values() .filter(|s| { if let Some(idx) = s.section_index { @@ -877,19 +853,24 @@ impl Object { self.parse_btf_maps(§ion, symbols)? } BpfSectionKind::Maps => { - let symbols: Vec = self - .symbols_by_index - .values() - .filter(|s| { - if let Some(idx) = s.section_index { - idx == section.index.0 - } else { - false - } - }) - .cloned() - .collect(); - self.parse_map_section(§ion, symbols)? + // take out self.maps so we can borrow the iterator below + // without cloning or collecting + let mut maps = mem::take(&mut self.maps); + + // extract the symbols for the .maps section, we'll need them + // during parsing + let symbols = self.symbol_table.values().filter(|s| { + s.section_index + .map(|idx| idx == section.index.0) + .unwrap_or(false) + }); + + let res = parse_maps_section(&mut maps, §ion, symbols); + + // put the maps back + self.maps = maps; + + res? } BpfSectionKind::Program => { let program = self.parse_program(§ion)?; @@ -900,7 +881,7 @@ impl Object { section.index, section .relocations - .drain(..) + .into_iter() .map(|rel| (rel.offset, rel)) .collect(), ); @@ -911,6 +892,91 @@ impl Object { Ok(()) } + + /// Sanitize BPF programs. + pub fn sanitize_programs(&mut self, features: &Features) { + for program in self.programs.values_mut() { + program.sanitize(features); + } + } +} + +fn insn_is_helper_call(ins: &bpf_insn) -> bool { + let klass = (ins.code & 0x07) as u32; + let op = (ins.code & 0xF0) as u32; + let src = (ins.code & 0x08) as u32; + + klass == BPF_JMP && op == BPF_CALL && src == BPF_K && ins.src_reg() == 0 && ins.dst_reg() == 0 +} + +const BPF_FUNC_PROBE_READ: i32 = 4; +const BPF_FUNC_PROBE_READ_STR: i32 = 45; +const BPF_FUNC_PROBE_READ_USER: i32 = 112; +const BPF_FUNC_PROBE_READ_KERNEL: i32 = 113; +const BPF_FUNC_PROBE_READ_USER_STR: i32 = 114; +const BPF_FUNC_PROBE_READ_KERNEL_STR: i32 = 115; + +impl Program { + fn sanitize(&mut self, features: &Features) { + for inst in &mut self.function.instructions { + if !insn_is_helper_call(inst) { + continue; + } + + match inst.imm { + BPF_FUNC_PROBE_READ_USER | BPF_FUNC_PROBE_READ_KERNEL + if !features.bpf_probe_read_kernel => + { + inst.imm = BPF_FUNC_PROBE_READ; + } + BPF_FUNC_PROBE_READ_USER_STR | BPF_FUNC_PROBE_READ_KERNEL_STR + if !features.bpf_probe_read_kernel => + { + inst.imm = BPF_FUNC_PROBE_READ_STR; + } + _ => {} + } + } + } +} + +// Parses multiple map definition contained in a single `maps` section (which is +// different from `.maps` which is used for BTF). We can tell where each map is +// based on the symbol table. +fn parse_maps_section<'a, I: Iterator>( + maps: &mut HashMap, + section: &Section, + symbols: I, +) -> Result<(), ParseError> { + let mut have_symbols = false; + + // each symbol in the section is a separate map + for (i, sym) in symbols.enumerate() { + let start = sym.address as usize; + let end = start + sym.size as usize; + let data = §ion.data[start..end]; + let name = sym + .name + .as_ref() + .ok_or(ParseError::MapSymbolNameNotFound { i })?; + let def = parse_map_def(name, data)?; + maps.insert( + name.to_string(), + Map::Legacy(LegacyMap { + section_index: section.index.0, + section_kind: section.kind, + symbol_index: Some(sym.index), + def, + data: Vec::new(), + }), + ); + have_symbols = true; + } + if !have_symbols { + return Err(ParseError::NoSymbolsForMapsSection); + } + + Ok(()) } /// Errors caught during parsing the object file @@ -976,25 +1042,40 @@ pub enum ParseError { #[error("the map number {i} in the `maps` section doesn't have a symbol name")] MapSymbolNameNotFound { i: usize }, - #[error("no symbols found for the maps included in the maps section")] - NoSymbolsInMapSection {}, + #[error("no symbols for `maps` section, can't parse maps")] + NoSymbolsForMapsSection, /// No BTF parsed for object #[error("no BTF parsed for object")] NoBTF, } -#[derive(Debug)] -enum BpfSectionKind { +/// The kind of an ELF section. +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum BpfSectionKind { + /// Undefined Undefined, + /// `maps` Maps, + /// `.maps` BtfMaps, + /// A program section Program, + /// `.data` Data, + /// `.rodata` + Rodata, + /// `.bss` + Bss, + /// `.text` Text, + /// `.BTF` Btf, + /// `.BTF.ext` BtfExt, + /// `license` License, + /// `version` Version, } @@ -1072,6 +1153,7 @@ impl<'data, 'file, 'a> TryFrom<&'a ObjSection<'data, 'file>> for Section<'a> { _ => return Err(ParseError::UnsupportedRelocationTarget), }, offset, + size: r.size(), }) }) .collect::, _>>()?, @@ -1161,10 +1243,11 @@ impl From for u32 { } } -fn parse_map(section: &Section, name: &str) -> Result { - let kind = MapKind::from(name); - let (def, data) = match kind { - MapKind::Bss | MapKind::Data | MapKind::Rodata => { +// Parsed '.bss' '.data' and '.rodata' sections. These sections are arrays of +// bytes and are relocated based on their section index. +fn parse_data_map_section(section: &Section) -> Result { + let (def, data) = match section.kind { + BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata => { let def = bpf_map_def { map_type: BPF_MAP_TYPE_ARRAY as u32, key_size: mem::size_of::() as u32, @@ -1172,7 +1255,7 @@ fn parse_map(section: &Section, name: &str) -> Result { // .bss will always have data.len() == 0 value_size: section.size as u32, max_entries: 1, - map_flags: if kind == MapKind::Rodata { + map_flags: if section.kind == BpfSectionKind::Rodata { BPF_F_RDONLY_PROG } else { 0 @@ -1181,14 +1264,15 @@ fn parse_map(section: &Section, name: &str) -> Result { }; (def, section.data.to_vec()) } - MapKind::Other => (parse_map_def(name, section.data)?, Vec::new()), + _ => unreachable!(), }; Ok(Map::Legacy(LegacyMap { section_index: section.index.0, - symbol_index: 0, + section_kind: section.kind, + // Data maps don't require symbols to be relocated + symbol_index: None, def, data, - kind, })) } @@ -1308,8 +1392,6 @@ pub fn parse_map_info(info: bpf_map_info, pinned: PinningType) -> Map { section_index: 0, symbol_index: 0, data: Vec::new(), - // We should never be loading the .bss or .data or .rodata FDs - kind: MapKind::Other, }) } else { Map::Legacy(LegacyMap { @@ -1323,10 +1405,9 @@ pub fn parse_map_info(info: bpf_map_info, pinned: PinningType) -> Map { id: info.id, }, section_index: 0, - symbol_index: 0, + symbol_index: None, + section_kind: BpfSectionKind::Undefined, data: Vec::new(), - // We should never be loading the .bss or .data or .rodata FDs - kind: MapKind::Other, }) } } @@ -1375,8 +1456,8 @@ mod tests { } fn fake_sym(obj: &mut Object, section_index: usize, address: u64, name: &str, size: u64) { - let idx = obj.symbols_by_index.len(); - obj.symbols_by_index.insert( + let idx = obj.symbol_table.len(); + obj.symbol_table.insert( idx + 1, Symbol { index: idx + 1, @@ -1512,65 +1593,21 @@ mod tests { assert_eq!(parse_map_def("foo", &buf).unwrap(), def); } - #[test] - fn test_parse_map_error() { - assert!(matches!( - parse_map(&fake_section(BpfSectionKind::Maps, "maps/foo", &[]), "foo",), - Err(ParseError::InvalidMapDefinition { .. }) - )); - } - - #[test] - fn test_parse_map() { - assert!(matches!( - parse_map( - &fake_section( - BpfSectionKind::Maps, - "maps/foo", - bytes_of(&bpf_map_def { - map_type: 1, - key_size: 2, - value_size: 3, - max_entries: 4, - map_flags: 5, - id: 0, - pinning: PinningType::None, - }) - ), - "foo" - ), - Ok(Map::Legacy(LegacyMap{ - section_index: 0, - def: bpf_map_def { - map_type: 1, - key_size: 2, - value_size: 3, - max_entries: 4, - map_flags: 5, - id: 0, - pinning: PinningType::None, - }, - data, - .. - })) if data.is_empty() - )) - } - #[test] fn test_parse_map_data() { let map_data = b"map data"; assert!(matches!( - parse_map( + parse_data_map_section( &fake_section( BpfSectionKind::Data, ".bss", map_data, ), - ".bss" ), Ok(Map::Legacy(LegacyMap { section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Data, + symbol_index: None, def: bpf_map_def { map_type: _map_type, key_size: 4, @@ -1581,8 +1618,7 @@ mod tests { pinning: PinningType::None, }, data, - kind - })) if data == map_data && value_size == map_data.len() as u32 && kind == MapKind::Bss + })) if data == map_data && value_size == map_data.len() as u32 )) } @@ -2253,12 +2289,12 @@ mod tests { pinning: PinningType::None, }, section_index: 1, - symbol_index: 1, + section_kind: BpfSectionKind::Rodata, + symbol_index: Some(1), data: vec![0, 0, 0], - kind: MapKind::Rodata, }), ); - obj.symbols_by_index.insert( + obj.symbol_table.insert( 1, Symbol { index: 1, diff --git a/aya-obj/src/relocation.rs b/aya-obj/src/relocation.rs index 8b0b9170..c2f2096d 100644 --- a/aya-obj/src/relocation.rs +++ b/aya-obj/src/relocation.rs @@ -1,6 +1,7 @@ //! Program relocation handling. use core::mem; +use std::collections::HashSet; use alloc::{borrow::ToOwned, string::String}; use log::debug; @@ -15,6 +16,7 @@ use crate::{ obj::{Function, Object, Program}, thiserror::{self, Error}, util::HashMap, + BpfSectionKind, }; pub(crate) const INS_SIZE: usize = mem::size_of::(); @@ -84,6 +86,7 @@ pub enum RelocationError { pub(crate) struct Relocation { // byte offset of the instruction to be relocated pub(crate) offset: u64, + pub(crate) size: u8, // index of the symbol to relocate to pub(crate) symbol_index: usize, } @@ -104,12 +107,15 @@ impl Object { pub fn relocate_maps<'a, I: Iterator, &'a Map)>>( &mut self, maps: I, + text_sections: &HashSet, ) -> Result<(), BpfRelocationError> { let mut maps_by_section = HashMap::new(); let mut maps_by_symbol = HashMap::new(); for (name, fd, map) in maps { maps_by_section.insert(map.section_index(), (name, fd, map)); - maps_by_symbol.insert(map.symbol_index(), (name, fd, map)); + if let Some(index) = map.symbol_index() { + maps_by_symbol.insert(index, (name, fd, map)); + } } let functions = self @@ -125,8 +131,8 @@ impl Object { relocations.values(), &maps_by_section, &maps_by_symbol, - &self.symbols_by_index, - self.text_section_index, + &self.symbol_table, + text_sections, ) .map_err(|error| BpfRelocationError { function: function.name.clone(), @@ -139,13 +145,16 @@ impl Object { } /// Relocates function calls - pub fn relocate_calls(&mut self) -> Result<(), BpfRelocationError> { + pub fn relocate_calls( + &mut self, + text_sections: &HashSet, + ) -> Result<(), BpfRelocationError> { for (name, program) in self.programs.iter_mut() { let linker = FunctionLinker::new( - self.text_section_index, &self.functions, &self.relocations, - &self.symbols_by_index, + &self.symbol_table, + text_sections, ); linker.link(program).map_err(|error| BpfRelocationError { function: name.to_owned(), @@ -163,7 +172,7 @@ fn relocate_maps<'a, I: Iterator>( maps_by_section: &HashMap, &Map)>, maps_by_symbol: &HashMap, &Map)>, symbol_table: &HashMap, - text_section_index: Option, + text_sections: &HashSet, ) -> Result<(), RelocationError> { let section_offset = fun.section_offset; let instructions = &mut fun.instructions; @@ -193,34 +202,54 @@ fn relocate_maps<'a, I: Iterator>( index: rel.symbol_index, })?; - let section_index = match sym.section_index { - Some(index) => index, + let Some(section_index) = sym.section_index else { // this is not a map relocation - None => continue, + continue; }; // calls and relocation to .text symbols are handled in a separate step - if insn_is_call(&instructions[ins_index]) || sym.section_index == text_section_index { + if insn_is_call(&instructions[ins_index]) || text_sections.contains(§ion_index) { continue; } - let (name, fd, map) = if maps_by_symbol.contains_key(&rel.symbol_index) { - maps_by_symbol - .get(&rel.symbol_index) - .ok_or(RelocationError::SectionNotFound { - symbol_index: rel.symbol_index, - symbol_name: sym.name.clone(), - section_index, - })? + let (name, fd, map) = if let Some(m) = maps_by_symbol.get(&rel.symbol_index) { + let map = &m.2; + debug!( + "relocating map by symbol index {:?}, kind {:?} at insn {ins_index} in section {}", + map.symbol_index(), + map.section_kind(), + fun.section_index.0 + ); + debug_assert_eq!(map.symbol_index().unwrap(), rel.symbol_index); + m } else { - maps_by_section - .get(§ion_index) - .ok_or(RelocationError::SectionNotFound { + let Some(m) = maps_by_section.get(§ion_index) else { + debug!( + "failed relocating map by section index {}", + section_index + ); + return Err(RelocationError::SectionNotFound { symbol_index: rel.symbol_index, symbol_name: sym.name.clone(), section_index, - })? + }); + }; + let map = &m.2; + debug!( + "relocating map by section index {}, kind {:?} at insn {ins_index} in section {}", + map.section_index(), + map.section_kind(), + fun.section_index.0, + ); + + debug_assert_eq!(map.symbol_index(), None); + debug_assert!(matches!( + map.section_kind(), + BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata + ),); + m }; + debug_assert_eq!(map.section_index(), section_index); let map_fd = fd.ok_or_else(|| RelocationError::MapNotCreated { name: (*name).into(), @@ -240,26 +269,26 @@ fn relocate_maps<'a, I: Iterator>( } struct FunctionLinker<'a> { - text_section_index: Option, - functions: &'a HashMap, + functions: &'a HashMap<(usize, u64), Function>, linked_functions: HashMap, relocations: &'a HashMap>, symbol_table: &'a HashMap, + text_sections: &'a HashSet, } impl<'a> FunctionLinker<'a> { fn new( - text_section_index: Option, - functions: &'a HashMap, + functions: &'a HashMap<(usize, u64), Function>, relocations: &'a HashMap>, symbol_table: &'a HashMap, + text_sections: &'a HashSet, ) -> FunctionLinker<'a> { FunctionLinker { - text_section_index, functions, linked_functions: HashMap::new(), relocations, symbol_table, + text_sections, } } @@ -289,6 +318,10 @@ impl<'a> FunctionLinker<'a> { // at `start_ins`. We'll use `start_ins` to do pc-relative calls. let start_ins = program.instructions.len(); program.instructions.extend(&fun.instructions); + debug!( + "linked function `{}` at instruction {}", + fun.name, start_ins + ); // link func and line info into the main program // the offset needs to be adjusted @@ -305,101 +338,110 @@ impl<'a> FunctionLinker<'a> { fn relocate(&mut self, program: &mut Function, fun: &Function) -> Result<(), RelocationError> { let relocations = self.relocations.get(&fun.section_index); - debug!("relocating program {} function {}", program.name, fun.name); - let n_instructions = fun.instructions.len(); let start_ins = program.instructions.len() - n_instructions; + debug!( + "relocating program `{}` function `{}` size {}", + program.name, fun.name, n_instructions + ); + // process all the instructions. We can't only loop over relocations since we need to // patch pc-relative calls too. for ins_index in start_ins..start_ins + n_instructions { let ins = program.instructions[ins_index]; let is_call = insn_is_call(&ins); - // only resolve relocations for calls or for instructions that - // reference symbols in the .text section (eg let callback = - // &some_fun) - let rel = if let Some(relocations) = relocations { - self.text_relocation_info( - relocations, - (fun.section_offset + (ins_index - start_ins) * INS_SIZE) as u64, - )? - // if not a call and not a .text reference, ignore the - // relocation (see relocate_maps()) - .and_then(|(_, sym)| { - if is_call { - return Some(sym.address); - } - - match sym.kind { - SymbolKind::Text => Some(sym.address), - SymbolKind::Section if sym.section_index == self.text_section_index => { - Some(sym.address + ins.imm as u64) - } - _ => None, - } + let rel = relocations + .and_then(|relocations| { + relocations + .get(&((fun.section_offset + (ins_index - start_ins) * INS_SIZE) as u64)) }) - } else { - None - }; - - // some_fun() or let x = &some_fun trigger linking, everything else - // can be ignored here + .and_then(|rel| { + // get the symbol for the relocation + self.symbol_table + .get(&rel.symbol_index) + .map(|sym| (rel, sym)) + }) + .filter(|(_rel, sym)| { + // only consider text relocations, data relocations are + // relocated in relocate_maps() + sym.kind == SymbolKind::Text + || sym + .section_index + .map(|section_index| self.text_sections.contains(§ion_index)) + .unwrap_or(false) + }); + + // not a call and not a text relocation, we don't need to do anything if !is_call && rel.is_none() { continue; } - let callee_address = if let Some(address) = rel { - // We have a relocation entry for the instruction at `ins_index`, the address of - // the callee is the address of the relocation's target symbol. - address + let (callee_section_index, callee_address) = if let Some((rel, sym)) = rel { + let address = match sym.kind { + SymbolKind::Text => sym.address, + // R_BPF_64_32 this is a call + SymbolKind::Section if rel.size == 32 => { + sym.address + (ins.imm + 1) as u64 * INS_SIZE as u64 + } + // R_BPF_64_64 this is a ld_imm64 text relocation + SymbolKind::Section if rel.size == 64 => sym.address + ins.imm as u64, + _ => todo!(), // FIXME: return an error here, + }; + (sym.section_index.unwrap(), address) } else { // The caller and the callee are in the same ELF section and this is a pc-relative // call. Resolve the pc-relative imm to an absolute address. let ins_size = INS_SIZE as i64; - (fun.section_offset as i64 - + ((ins_index - start_ins) as i64) * ins_size - + (ins.imm + 1) as i64 * ins_size) as u64 + ( + fun.section_index.0, + (fun.section_offset as i64 + + ((ins_index - start_ins) as i64) * ins_size + + (ins.imm + 1) as i64 * ins_size) as u64, + ) }; debug!( - "relocating {} to callee address {} ({})", + "relocating {} to callee address {:#x} in section {} ({}) at instruction {ins_index}", if is_call { "call" } else { "reference" }, callee_address, + callee_section_index, if rel.is_some() { "relocation" } else { - "relative" + "pc-relative" }, ); // lookup and link the callee if it hasn't been linked already. `callee_ins_index` will // contain the instruction index of the callee inside the program. - let callee = - self.functions - .get(&callee_address) - .ok_or(RelocationError::UnknownFunction { - address: callee_address, - caller_name: fun.name.clone(), - })?; + let callee = self + .functions + .get(&(callee_section_index, callee_address)) + .ok_or(RelocationError::UnknownFunction { + address: callee_address, + caller_name: fun.name.clone(), + })?; - debug!("callee is {}", callee.name); + debug!("callee is `{}`", callee.name); - let callee_ins_index = self.link_function(program, callee)?; + let callee_ins_index = self.link_function(program, callee)? as i32; let mut ins = &mut program.instructions[ins_index]; - ins.imm = if callee_ins_index < ins_index { - -((ins_index - callee_ins_index + 1) as i32) - } else { - (callee_ins_index - ins_index - 1) as i32 - }; + let ins_index = ins_index as i32; + ins.imm = callee_ins_index - ins_index - 1; + debug!( + "callee `{}` is at ins {callee_ins_index}, {} from current instruction {ins_index}", + callee.name, ins.imm + ); if !is_call { ins.set_src_reg(BPF_PSEUDO_FUNC as u8); } } debug!( - "finished relocating program {} function {}", + "finished relocating program `{}` function `{}`", program.name, fun.name ); @@ -438,25 +480,6 @@ impl<'a> FunctionLinker<'a> { } Ok(()) } - - fn text_relocation_info( - &self, - relocations: &HashMap, - offset: u64, - ) -> Result, RelocationError> { - if let Some(rel) = relocations.get(&offset) { - let sym = - self.symbol_table - .get(&rel.symbol_index) - .ok_or(RelocationError::UnknownSymbol { - index: rel.symbol_index, - })?; - - Ok(Some((*rel, sym.clone()))) - } else { - Ok(None) - } - } } fn insn_is_call(ins: &bpf_insn) -> bool { @@ -476,7 +499,10 @@ fn insn_is_call(ins: &bpf_insn) -> bool { mod test { use alloc::{string::ToString, vec, vec::Vec}; - use crate::maps::{bpf_map_def, BtfMap, BtfMapDef, LegacyMap, Map, MapKind}; + use crate::{ + maps::{BtfMap, LegacyMap, Map}, + BpfSectionKind, + }; use super::*; @@ -498,25 +524,20 @@ mod test { fn fake_legacy_map(symbol_index: usize) -> Map { Map::Legacy(LegacyMap { - def: bpf_map_def { - ..Default::default() - }, + def: Default::default(), section_index: 0, - symbol_index, + section_kind: BpfSectionKind::Undefined, + symbol_index: Some(symbol_index), data: Vec::new(), - kind: MapKind::Other, }) } fn fake_btf_map(symbol_index: usize) -> Map { Map::Btf(BtfMap { - def: BtfMapDef { - ..Default::default() - }, + def: Default::default(), section_index: 0, symbol_index, data: Vec::new(), - kind: MapKind::Other, }) } @@ -549,6 +570,7 @@ mod test { let relocations = vec![Relocation { offset: 0x0, symbol_index: 1, + size: 64, }]; let maps_by_section = HashMap::new(); @@ -561,7 +583,7 @@ mod test { &maps_by_section, &maps_by_symbol, &symbol_table, - None, + &HashSet::new(), ) .unwrap(); @@ -596,10 +618,12 @@ mod test { Relocation { offset: 0x0, symbol_index: 1, + size: 64, }, Relocation { offset: mem::size_of::() as u64, symbol_index: 2, + size: 64, }, ]; let maps_by_section = HashMap::new(); @@ -617,7 +641,7 @@ mod test { &maps_by_section, &maps_by_symbol, &symbol_table, - None, + &HashSet::new(), ) .unwrap(); @@ -646,6 +670,7 @@ mod test { let relocations = vec![Relocation { offset: 0x0, symbol_index: 1, + size: 64, }]; let maps_by_section = HashMap::new(); @@ -658,7 +683,7 @@ mod test { &maps_by_section, &maps_by_symbol, &symbol_table, - None, + &HashSet::new(), ) .unwrap(); @@ -693,10 +718,12 @@ mod test { Relocation { offset: 0x0, symbol_index: 1, + size: 64, }, Relocation { offset: mem::size_of::() as u64, symbol_index: 2, + size: 64, }, ]; let maps_by_section = HashMap::new(); @@ -714,7 +741,7 @@ mod test { &maps_by_section, &maps_by_symbol, &symbol_table, - None, + &HashSet::new(), ) .unwrap(); diff --git a/aya/src/bpf.rs b/aya/src/bpf.rs index 0e74d160..859ee16d 100644 --- a/aya/src/bpf.rs +++ b/aya/src/bpf.rs @@ -11,6 +11,7 @@ use aya_obj::{ btf::{BtfFeatures, BtfRelocationError}, generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS}, relocation::BpfRelocationError, + BpfSectionKind, Features, }; use log::debug; use thiserror::Error; @@ -23,7 +24,6 @@ use crate::{ maps::{Map, MapData, MapError}, obj::{ btf::{Btf, BtfError}, - maps::MapKind, Object, ParseError, ProgramSection, }, programs::{ @@ -36,7 +36,7 @@ use crate::{ bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_btf_datasec_supported, is_btf_decl_tag_supported, is_btf_float_supported, is_btf_func_global_supported, is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported, - is_prog_name_supported, retry_with_verifier_logs, + is_probe_read_kernel_supported, is_prog_name_supported, retry_with_verifier_logs, }, util::{bytes_of, bytes_of_slice, possible_cpus, VerifierLog, POSSIBLE_CPUS}, }; @@ -66,39 +66,30 @@ unsafe impl Pod for [T; N] {} pub use aya_obj::maps::{bpf_map_def, PinningType}; lazy_static! { - pub(crate) static ref FEATURES: Features = Features::new(); + pub(crate) static ref FEATURES: Features = detect_features(); } -// Features implements BPF and BTF feature detection -#[derive(Default, Debug)] -pub(crate) struct Features { - pub bpf_name: bool, - pub bpf_perf_link: bool, - pub btf: Option, -} - -impl Features { - fn new() -> Self { - let btf = if is_btf_supported() { - Some(BtfFeatures { - btf_func: is_btf_func_supported(), - btf_func_global: is_btf_func_global_supported(), - btf_datasec: is_btf_datasec_supported(), - btf_float: is_btf_float_supported(), - btf_decl_tag: is_btf_decl_tag_supported(), - btf_type_tag: is_btf_type_tag_supported(), - }) - } else { - None - }; - let f = Features { - bpf_name: is_prog_name_supported(), - bpf_perf_link: is_perf_link_supported(), - btf, - }; - debug!("BPF Feature Detection: {:#?}", f); - f - } +fn detect_features() -> Features { + let btf = if is_btf_supported() { + Some(BtfFeatures { + btf_func: is_btf_func_supported(), + btf_func_global: is_btf_func_global_supported(), + btf_datasec: is_btf_datasec_supported(), + btf_float: is_btf_float_supported(), + btf_decl_tag: is_btf_decl_tag_supported(), + btf_type_tag: is_btf_type_tag_supported(), + }) + } else { + None + }; + let f = Features { + bpf_name: is_prog_name_supported(), + bpf_probe_read_kernel: is_probe_read_kernel_supported(), + bpf_perf_link: is_perf_link_supported(), + btf, + }; + debug!("BPF Feature Detection: {:#?}", f); + f } /// Builder style API for advanced loading of eBPF programs. @@ -415,14 +406,14 @@ impl<'a> BpfLoader<'a> { } PinningType::None => map.create(&name)?, }; - if !map.obj.data().is_empty() && map.obj.kind() != MapKind::Bss { + if !map.obj.data().is_empty() && map.obj.section_kind() != BpfSectionKind::Bss { bpf_map_update_elem_ptr(fd, &0 as *const _, map.obj.data_mut().as_mut_ptr(), 0) .map_err(|(_, io_error)| MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), io_error, })?; } - if map.obj.kind() == MapKind::Rodata { + if map.obj.section_kind() == BpfSectionKind::Rodata { bpf_map_freeze(fd).map_err(|(_, io_error)| MapError::SyscallError { call: "bpf_map_freeze".to_owned(), io_error, @@ -431,11 +422,19 @@ impl<'a> BpfLoader<'a> { maps.insert(name, map); } + let text_sections = obj + .functions + .keys() + .map(|(section_index, _)| *section_index) + .collect(); + obj.relocate_maps( maps.iter() .map(|(s, data)| (s.as_str(), data.fd, &data.obj)), + &text_sections, )?; - obj.relocate_calls()?; + obj.relocate_calls(&text_sections)?; + obj.sanitize_programs(&FEATURES); let programs = obj .programs diff --git a/aya/src/maps/array/array.rs b/aya/src/maps/array/array.rs index 9344f192..71ac1223 100644 --- a/aya/src/maps/array/array.rs +++ b/aya/src/maps/array/array.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -35,9 +34,9 @@ pub struct Array { _v: PhantomData, } -impl, V: Pod> Array { +impl, V: Pod> Array { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -52,7 +51,7 @@ impl, V: Pod> Array { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn len(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } /// Returns the value stored at the given index. @@ -62,7 +61,7 @@ impl, V: Pod> Array { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_lookup_elem` fails. pub fn get(&self, index: &u32, flags: u64) -> Result { - let data = self.inner.as_ref(); + let data = self.inner.borrow(); check_bounds(data, *index)?; let fd = data.fd_or_err()?; @@ -82,7 +81,7 @@ impl, V: Pod> Array { } } -impl, V: Pod> Array { +impl, V: Pod> Array { /// Sets the value of the element at the given index. /// /// # Errors @@ -90,7 +89,7 @@ impl, V: Pod> Array { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_update_elem` fails. pub fn set(&mut self, index: u32, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| { @@ -103,9 +102,9 @@ impl, V: Pod> Array { } } -impl, V: Pod> IterableMap for Array { +impl, V: Pod> IterableMap for Array { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result { diff --git a/aya/src/maps/array/per_cpu_array.rs b/aya/src/maps/array/per_cpu_array.rs index a3b5eab9..e83f59e5 100644 --- a/aya/src/maps/array/per_cpu_array.rs +++ b/aya/src/maps/array/per_cpu_array.rs @@ -1,5 +1,5 @@ use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -53,9 +53,9 @@ pub struct PerCpuArray { _v: PhantomData, } -impl, V: Pod> PerCpuArray { +impl, V: Pod> PerCpuArray { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -70,7 +70,7 @@ impl, V: Pod> PerCpuArray { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn len(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } /// Returns a slice of values - one for each CPU - stored at the given index. @@ -80,7 +80,7 @@ impl, V: Pod> PerCpuArray { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_lookup_elem` fails. pub fn get(&self, index: &u32, flags: u64) -> Result, MapError> { - let data = self.inner.as_ref(); + let data = self.inner.borrow(); check_bounds(data, *index)?; let fd = data.fd_or_err()?; @@ -100,7 +100,7 @@ impl, V: Pod> PerCpuArray { } } -impl, V: Pod> PerCpuArray { +impl, V: Pod> PerCpuArray { /// Sets the values - one for each CPU - at the given index. /// /// # Errors @@ -108,7 +108,7 @@ impl, V: Pod> PerCpuArray { /// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`] /// if `bpf_map_update_elem` fails. pub fn set(&mut self, index: u32, values: PerCpuValues, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; @@ -122,9 +122,9 @@ impl, V: Pod> PerCpuArray { } } -impl, V: Pod> IterableMap> for PerCpuArray { +impl, V: Pod> IterableMap> for PerCpuArray { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result, MapError> { diff --git a/aya/src/maps/array/program_array.rs b/aya/src/maps/array/program_array.rs index 27566e28..b8a57cf5 100644 --- a/aya/src/maps/array/program_array.rs +++ b/aya/src/maps/array/program_array.rs @@ -1,7 +1,7 @@ //! An array of eBPF program file descriptors used as a jump table. use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, os::unix::prelude::{AsRawFd, RawFd}, }; @@ -51,9 +51,9 @@ pub struct ProgramArray { inner: T, } -impl> ProgramArray { +impl> ProgramArray { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -64,17 +64,17 @@ impl> ProgramArray { /// An iterator over the indices of the array that point to a program. The iterator item type /// is `Result`. pub fn indices(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl> ProgramArray { +impl> ProgramArray { /// Sets the target program file descriptor for the given index in the jump table. /// /// When an eBPF program calls `bpf_tail_call(ctx, prog_array, index)`, control /// flow will jump to `program`. pub fn set(&mut self, index: u32, program: ProgramFd, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, index)?; let fd = data.fd_or_err()?; let prog_fd = program.as_raw_fd(); @@ -93,9 +93,9 @@ impl> ProgramArray { /// Calling `bpf_tail_call(ctx, prog_array, index)` on an index that has been cleared returns an /// error. pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); check_bounds(data, *index)?; - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow_mut().fd_or_err()?; bpf_map_delete_elem(fd, index) .map(|_| ()) diff --git a/aya/src/maps/bloom_filter.rs b/aya/src/maps/bloom_filter.rs index 16eb1aa3..f7f4be02 100644 --- a/aya/src/maps/bloom_filter.rs +++ b/aya/src/maps/bloom_filter.rs @@ -1,5 +1,5 @@ //! A Bloom Filter. -use std::{borrow::Borrow, convert::AsRef, marker::PhantomData}; +use std::{borrow::Borrow, marker::PhantomData}; use crate::{ maps::{check_v_size, MapData, MapError}, @@ -35,9 +35,9 @@ pub struct BloomFilter { _v: PhantomData, } -impl, V: Pod> BloomFilter { +impl, V: Pod> BloomFilter { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_v_size::(data)?; let _ = data.fd_or_err()?; @@ -50,7 +50,7 @@ impl, V: Pod> BloomFilter { /// Query the existence of the element. pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_lookup_elem_ptr::(fd, None, &mut value, flags) .map_err(|(_, io_error)| MapError::SyscallError { @@ -63,7 +63,7 @@ impl, V: Pod> BloomFilter { /// Inserts a value into the map. pub fn insert(&self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_push_elem".to_owned(), @@ -84,10 +84,7 @@ mod tests { bpf_map_type::{BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_PERF_EVENT_ARRAY}, }, maps::{Map, MapData}, - obj::{ - self, - maps::{LegacyMap, MapKind}, - }, + obj::{self, maps::LegacyMap, BpfSectionKind}, sys::{override_syscall, SysResult, Syscall}, }; use libc::{EFAULT, ENOENT}; @@ -103,9 +100,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, data: Vec::new(), - kind: MapKind::Other, }) } @@ -142,9 +139,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, data: Vec::new(), - kind: MapKind::Other, }), fd: None, pinned: false, diff --git a/aya/src/maps/hash_map/hash_map.rs b/aya/src/maps/hash_map/hash_map.rs index a67cdead..26a4af0f 100644 --- a/aya/src/maps/hash_map/hash_map.rs +++ b/aya/src/maps/hash_map/hash_map.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -39,9 +38,9 @@ pub struct HashMap { _v: PhantomData, } -impl, K: Pod, V: Pod> HashMap { +impl, K: Pod, V: Pod> HashMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -54,7 +53,7 @@ impl, K: Pod, V: Pod> HashMap { /// Returns a copy of the value associated with the key. pub fn get(&self, key: &K, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -73,11 +72,11 @@ impl, K: Pod, V: Pod> HashMap { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl, K: Pod, V: Pod> HashMap { +impl, K: Pod, V: Pod> HashMap { /// Inserts a key-value pair into the map. pub fn insert( &mut self, @@ -85,18 +84,18 @@ impl, K: Pod, V: Pod> HashMap { value: impl Borrow, flags: u64, ) -> Result<(), MapError> { - hash_map::insert(self.inner.as_mut(), key.borrow(), value.borrow(), flags) + hash_map::insert(self.inner.borrow_mut(), key.borrow(), value.borrow(), flags) } /// Removes a key from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod, V: Pod> IterableMap for HashMap { +impl, K: Pod, V: Pod> IterableMap for HashMap { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result { @@ -117,10 +116,7 @@ mod tests { bpf_map_type::{BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_LRU_HASH}, }, maps::{Map, MapData}, - obj::{ - self, - maps::{LegacyMap, MapKind}, - }, + obj::{self, maps::LegacyMap, BpfSectionKind}, sys::{override_syscall, SysResult, Syscall}, }; @@ -136,9 +132,9 @@ mod tests { ..Default::default() }, section_index: 0, + section_kind: BpfSectionKind::Maps, data: Vec::new(), - kind: MapKind::Other, - symbol_index: 0, + symbol_index: None, }) } @@ -267,9 +263,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, data: Vec::new(), - kind: MapKind::Other, }), fd: Some(42), pinned: false, diff --git a/aya/src/maps/hash_map/per_cpu_hash_map.rs b/aya/src/maps/hash_map/per_cpu_hash_map.rs index eb7fe6ae..eea8ed01 100644 --- a/aya/src/maps/hash_map/per_cpu_hash_map.rs +++ b/aya/src/maps/hash_map/per_cpu_hash_map.rs @@ -1,7 +1,6 @@ //! Per-CPU hash map. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -48,9 +47,9 @@ pub struct PerCpuHashMap { _v: PhantomData, } -impl, K: Pod, V: Pod> PerCpuHashMap { +impl, K: Pod, V: Pod> PerCpuHashMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -64,7 +63,7 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// Returns a slice of values - one for each CPU - associated with the key. pub fn get(&self, key: &K, flags: u64) -> Result, MapError> { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let values = bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -83,11 +82,11 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl, K: Pod, V: Pod> PerCpuHashMap { +impl, K: Pod, V: Pod> PerCpuHashMap { /// Inserts a slice of values - one for each CPU - for the given key. /// /// # Examples @@ -122,7 +121,7 @@ impl, K: Pod, V: Pod> PerCpuHashMap { values: PerCpuValues, flags: u64, ) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow_mut().fd_or_err()?; bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err( |(_, io_error)| MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), @@ -135,13 +134,15 @@ impl, K: Pod, V: Pod> PerCpuHashMap { /// Removes a key from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod, V: Pod> IterableMap> for PerCpuHashMap { +impl, K: Pod, V: Pod> IterableMap> + for PerCpuHashMap +{ fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result, MapError> { diff --git a/aya/src/maps/lpm_trie.rs b/aya/src/maps/lpm_trie.rs index 4a6cc94f..95894a65 100644 --- a/aya/src/maps/lpm_trie.rs +++ b/aya/src/maps/lpm_trie.rs @@ -1,7 +1,6 @@ //! A LPM Trie. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -99,9 +98,9 @@ impl Clone for Key { // A Pod impl is required as Key struct is a key for a map. unsafe impl Pod for Key {} -impl, K: Pod, V: Pod> LpmTrie { +impl, K: Pod, V: Pod> LpmTrie { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::, V>(data)?; let _ = data.fd_or_err()?; @@ -115,7 +114,7 @@ impl, K: Pod, V: Pod> LpmTrie { /// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie. pub fn get(&self, key: &Key, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -134,17 +133,17 @@ impl, K: Pod, V: Pod> LpmTrie { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result, MapError>`. pub fn keys(&self) -> MapKeys<'_, Key> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// An iterator visiting all keys matching key. The /// iterator item type is `Result, MapError>`. pub fn iter_key(&self, key: Key) -> LpmTrieKeys<'_, K> { - LpmTrieKeys::new(self.inner.as_ref(), key) + LpmTrieKeys::new(self.inner.borrow(), key) } } -impl, K: Pod, V: Pod> LpmTrie { +impl, K: Pod, V: Pod> LpmTrie { /// Inserts a key value pair into the map. pub fn insert( &mut self, @@ -152,7 +151,7 @@ impl, K: Pod, V: Pod> LpmTrie { value: impl Borrow, flags: u64, ) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), @@ -167,7 +166,7 @@ impl, K: Pod, V: Pod> LpmTrie { /// /// Both the prefix and data must match exactly - this method does not do a longest prefix match. pub fn remove(&mut self, key: &Key) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_delete_elem(fd, key) .map(|_| ()) .map_err(|(_, io_error)| MapError::SyscallError { @@ -177,9 +176,9 @@ impl, K: Pod, V: Pod> LpmTrie { } } -impl, K: Pod, V: Pod> IterableMap, V> for LpmTrie { +impl, K: Pod, V: Pod> IterableMap, V> for LpmTrie { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &Key) -> Result { @@ -247,10 +246,7 @@ mod tests { bpf_map_type::{BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_PERF_EVENT_ARRAY}, }, maps::{Map, MapData}, - obj::{ - self, - maps::{LegacyMap, MapKind}, - }, + obj::{self, maps::LegacyMap, BpfSectionKind}, sys::{override_syscall, SysResult, Syscall}, }; use libc::{EFAULT, ENOENT}; @@ -266,9 +262,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, data: Vec::new(), - kind: MapKind::Other, }) } @@ -322,9 +318,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: None, data: Vec::new(), - kind: MapKind::Other, }), fd: None, btf_fd: None, diff --git a/aya/src/maps/mod.rs b/aya/src/maps/mod.rs index 505adafd..fa27c053 100644 --- a/aya/src/maps/mod.rs +++ b/aya/src/maps/mod.rs @@ -37,7 +37,6 @@ //! versa. Because of that, all map values must be plain old data and therefore //! implement the [Pod] trait. use std::{ - convert::{AsMut, AsRef}, ffi::CString, fmt, io, marker::PhantomData, @@ -481,18 +480,6 @@ pub struct MapData { pub pinned: bool, } -impl AsRef for MapData { - fn as_ref(&self) -> &MapData { - self - } -} - -impl AsMut for MapData { - fn as_mut(&mut self) -> &mut MapData { - self - } -} - impl MapData { /// Creates a new map with the provided `name` pub fn create(&mut self, name: &str) -> Result { @@ -845,7 +832,7 @@ mod tests { bpf_map_def, generated::{bpf_cmd, bpf_map_type::BPF_MAP_TYPE_HASH}, maps::MapData, - obj::maps::{LegacyMap, MapKind}, + obj::{maps::LegacyMap, BpfSectionKind}, sys::{override_syscall, Syscall}, }; @@ -861,9 +848,9 @@ mod tests { ..Default::default() }, section_index: 0, - symbol_index: 0, + section_kind: BpfSectionKind::Maps, + symbol_index: Some(0), data: Vec::new(), - kind: MapKind::Other, }) } diff --git a/aya/src/maps/perf/async_perf_event_array.rs b/aya/src/maps/perf/async_perf_event_array.rs index 39ffcfa1..7e55889a 100644 --- a/aya/src/maps/perf/async_perf_event_array.rs +++ b/aya/src/maps/perf/async_perf_event_array.rs @@ -1,6 +1,6 @@ use bytes::BytesMut; use std::{ - convert::AsMut, + borrow::{Borrow, BorrowMut}, os::unix::prelude::{AsRawFd, RawFd}, }; @@ -89,7 +89,7 @@ pub struct AsyncPerfEventArray { perf_map: PerfEventArray, } -impl + AsRef> AsyncPerfEventArray { +impl + Borrow> AsyncPerfEventArray { /// Opens the perf buffer at the given index. /// /// The returned buffer will receive all the events eBPF programs send at the given index. @@ -112,7 +112,7 @@ impl + AsRef> AsyncPerfEventArray { } } -impl> AsyncPerfEventArray { +impl> AsyncPerfEventArray { pub(crate) fn new(map: T) -> Result, MapError> { Ok(AsyncPerfEventArray { perf_map: PerfEventArray::new(map)?, @@ -138,7 +138,7 @@ pub struct AsyncPerfEventArrayBuffer { } #[cfg(any(feature = "async_tokio"))] -impl + AsRef> AsyncPerfEventArrayBuffer { +impl + Borrow> AsyncPerfEventArrayBuffer { /// Reads events from the buffer. /// /// This method reads events into the provided slice of buffers, filling @@ -168,7 +168,7 @@ impl + AsRef> AsyncPerfEventArrayBuffer { } #[cfg(all(not(feature = "async_tokio"), feature = "async_std"))] -impl + AsRef> AsyncPerfEventArrayBuffer { +impl + Borrow> AsyncPerfEventArrayBuffer { /// Reads events from the buffer. /// /// This method reads events into the provided slice of buffers, filling diff --git a/aya/src/maps/perf/perf_event_array.rs b/aya/src/maps/perf/perf_event_array.rs index 8dafd7f1..c241a37b 100644 --- a/aya/src/maps/perf/perf_event_array.rs +++ b/aya/src/maps/perf/perf_event_array.rs @@ -2,7 +2,7 @@ //! //! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page. use std::{ - convert::AsMut, + borrow::{Borrow, BorrowMut}, ops::Deref, os::unix::io::{AsRawFd, RawFd}, sync::Arc, @@ -31,7 +31,7 @@ pub struct PerfEventArrayBuffer { buf: PerfBuffer, } -impl + AsRef> PerfEventArrayBuffer { +impl + Borrow> PerfEventArrayBuffer { /// Returns true if the buffer contains events that haven't been read. pub fn readable(&self) -> bool { self.buf.readable() @@ -55,7 +55,7 @@ impl + AsRef> PerfEventArrayBuffer { } } -impl + AsRef> AsRawFd for PerfEventArrayBuffer { +impl + Borrow> AsRawFd for PerfEventArrayBuffer { fn as_raw_fd(&self) -> RawFd { self.buf.as_raw_fd() } @@ -84,14 +84,14 @@ impl + AsRef> AsRawFd for PerfEventArrayBuffer { /// ```no_run /// # use aya::maps::perf::PerfEventArrayBuffer; /// # use aya::maps::MapData; -/// # use std::convert::AsMut; +/// # use std::borrow::BorrowMut; /// # struct Poll { _t: std::marker::PhantomData }; -/// # impl> Poll { +/// # impl> Poll { /// # fn poll_readable(&self) -> &mut [PerfEventArrayBuffer] { /// # &mut [] /// # } /// # } -/// # fn poll_buffers>(bufs: Vec>) -> Poll { +/// # fn poll_buffers>(bufs: Vec>) -> Poll { /// # Poll { _t: std::marker::PhantomData } /// # } /// # #[derive(thiserror::Error, Debug)] @@ -160,9 +160,9 @@ pub struct PerfEventArray { page_size: usize, } -impl> PerfEventArray { +impl> PerfEventArray { pub(crate) fn new(map: T) -> Result, MapError> { - let _fd = map.as_ref().fd_or_err()?; + let _fd = map.borrow().fd_or_err()?; Ok(PerfEventArray { map: Arc::new(map), @@ -171,7 +171,7 @@ impl> PerfEventArray { } } -impl + AsRef> PerfEventArray { +impl + Borrow> PerfEventArray { /// Opens the perf buffer at the given index. /// /// The returned buffer will receive all the events eBPF programs send at the given index. @@ -183,7 +183,7 @@ impl + AsRef> PerfEventArray { // FIXME: keep track of open buffers // this cannot fail as new() checks that the fd is open - let map_data: &MapData = self.map.deref().as_ref(); + let map_data: &MapData = self.map.deref().borrow(); let map_fd = map_data.fd_or_err().unwrap(); let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?; bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0) diff --git a/aya/src/maps/queue.rs b/aya/src/maps/queue.rs index d10ecd04..c6810546 100644 --- a/aya/src/maps/queue.rs +++ b/aya/src/maps/queue.rs @@ -1,7 +1,6 @@ //! A FIFO queue. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -34,9 +33,9 @@ pub struct Queue { _v: PhantomData, } -impl, V: Pod> Queue { +impl, V: Pod> Queue { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::<(), V>(data)?; let _fd = data.fd_or_err()?; @@ -51,11 +50,11 @@ impl, V: Pod> Queue { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn capacity(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } } -impl, V: Pod> Queue { +impl, V: Pod> Queue { /// Removes the first element and returns it. /// /// # Errors @@ -63,7 +62,7 @@ impl, V: Pod> Queue { /// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`] /// if `bpf_map_lookup_and_delete_elem` fails. pub fn pop(&mut self, flags: u64) -> Result { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_and_delete_elem::(fd, None, flags).map_err( |(_, io_error)| MapError::SyscallError { @@ -80,7 +79,7 @@ impl, V: Pod> Queue { /// /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. pub fn push(&mut self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_push_elem".to_owned(), diff --git a/aya/src/maps/sock/sock_hash.rs b/aya/src/maps/sock/sock_hash.rs index 7da2d097..244dcd1c 100644 --- a/aya/src/maps/sock/sock_hash.rs +++ b/aya/src/maps/sock/sock_hash.rs @@ -1,6 +1,5 @@ use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, os::unix::io::{AsRawFd, RawFd}, }; @@ -69,9 +68,9 @@ pub struct SockHash { _k: PhantomData, } -impl, K: Pod> SockHash { +impl, K: Pod> SockHash { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _ = data.fd_or_err()?; @@ -83,7 +82,7 @@ impl, K: Pod> SockHash { /// Returns the fd of the socket stored at the given key. pub fn get(&self, key: &K, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_lookup_elem".to_owned(), @@ -102,7 +101,7 @@ impl, K: Pod> SockHash { /// An iterator visiting all keys in arbitrary order. The iterator element /// type is `Result`. pub fn keys(&self) -> MapKeys<'_, K> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// Returns the map's file descriptor. @@ -110,11 +109,11 @@ impl, K: Pod> SockHash { /// The returned file descriptor can be used to attach programs that work with /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). pub fn fd(&self) -> Result { - Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) + Ok(SockMapFd(self.inner.borrow().fd_or_err()?)) } } -impl, K: Pod> SockHash { +impl, K: Pod> SockHash { /// Inserts a socket under the given key. pub fn insert( &mut self, @@ -122,18 +121,23 @@ impl, K: Pod> SockHash { value: I, flags: u64, ) -> Result<(), MapError> { - hash_map::insert(self.inner.as_mut(), key.borrow(), &value.as_raw_fd(), flags) + hash_map::insert( + self.inner.borrow_mut(), + key.borrow(), + &value.as_raw_fd(), + flags, + ) } /// Removes a socket from the map. pub fn remove(&mut self, key: &K) -> Result<(), MapError> { - hash_map::remove(self.inner.as_mut(), key) + hash_map::remove(self.inner.borrow_mut(), key) } } -impl, K: Pod> IterableMap for SockHash { +impl, K: Pod> IterableMap for SockHash { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, key: &K) -> Result { diff --git a/aya/src/maps/sock/sock_map.rs b/aya/src/maps/sock/sock_map.rs index 28c5b404..32625b41 100644 --- a/aya/src/maps/sock/sock_map.rs +++ b/aya/src/maps/sock/sock_map.rs @@ -1,7 +1,7 @@ //! An array of eBPF program file descriptors used as a jump table. use std::{ - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, os::unix::{io::AsRawFd, prelude::RawFd}, }; @@ -44,9 +44,9 @@ pub struct SockMap { pub(crate) inner: T, } -impl> SockMap { +impl> SockMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::(data)?; let _fd = data.fd_or_err()?; @@ -57,7 +57,7 @@ impl> SockMap { /// An iterator over the indices of the array that point to a program. The iterator item type /// is `Result`. pub fn indices(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } /// Returns the map's file descriptor. @@ -65,14 +65,14 @@ impl> SockMap { /// The returned file descriptor can be used to attach programs that work with /// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb). pub fn fd(&self) -> Result { - Ok(SockMapFd(self.inner.as_ref().fd_or_err()?)) + Ok(SockMapFd(self.inner.borrow().fd_or_err()?)) } } -impl> SockMap { +impl> SockMap { /// Stores a socket into the map. pub fn set(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); let fd = data.fd_or_err()?; check_bounds(data, index)?; bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err( @@ -86,7 +86,7 @@ impl> SockMap { /// Removes the socket stored at `index` from the map. pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> { - let data = self.inner.as_mut(); + let data = self.inner.borrow_mut(); let fd = data.fd_or_err()?; check_bounds(data, *index)?; bpf_map_delete_elem(fd, index) diff --git a/aya/src/maps/stack.rs b/aya/src/maps/stack.rs index c0742cfa..db428ddf 100644 --- a/aya/src/maps/stack.rs +++ b/aya/src/maps/stack.rs @@ -1,7 +1,6 @@ //! A LIFO stack. use std::{ - borrow::Borrow, - convert::{AsMut, AsRef}, + borrow::{Borrow, BorrowMut}, marker::PhantomData, }; @@ -34,9 +33,9 @@ pub struct Stack { _v: PhantomData, } -impl, V: Pod> Stack { +impl, V: Pod> Stack { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); check_kv_size::<(), V>(data)?; let _fd = data.fd_or_err()?; @@ -51,11 +50,11 @@ impl, V: Pod> Stack { /// /// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side. pub fn capacity(&self) -> u32 { - self.inner.as_ref().obj.max_entries() + self.inner.borrow().obj.max_entries() } } -impl, V: Pod> Stack { +impl, V: Pod> Stack { /// Removes the last element and returns it. /// /// # Errors @@ -63,7 +62,7 @@ impl, V: Pod> Stack { /// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`] /// if `bpf_map_lookup_and_delete_elem` fails. pub fn pop(&mut self, flags: u64) -> Result { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let value = bpf_map_lookup_and_delete_elem::(fd, None, flags).map_err( |(_, io_error)| MapError::SyscallError { @@ -80,7 +79,7 @@ impl, V: Pod> Stack { /// /// [`MapError::SyscallError`] if `bpf_map_update_elem` fails. pub fn push(&mut self, value: impl Borrow, flags: u64) -> Result<(), MapError> { - let fd = self.inner.as_mut().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| { MapError::SyscallError { call: "bpf_map_update_elem".to_owned(), diff --git a/aya/src/maps/stack_trace.rs b/aya/src/maps/stack_trace.rs index 135387e4..a8cce414 100644 --- a/aya/src/maps/stack_trace.rs +++ b/aya/src/maps/stack_trace.rs @@ -1,7 +1,7 @@ //! A hash map of kernel or user space stack traces. //! //! See [`StackTraceMap`] for documentation and examples. -use std::{collections::BTreeMap, convert::AsRef, fs, io, mem, path::Path, str::FromStr}; +use std::{borrow::Borrow, collections::BTreeMap, fs, io, mem, path::Path, str::FromStr}; use crate::{ maps::{IterableMap, MapData, MapError, MapIter, MapKeys}, @@ -67,9 +67,9 @@ pub struct StackTraceMap { max_stack_depth: usize, } -impl> StackTraceMap { +impl> StackTraceMap { pub(crate) fn new(map: T) -> Result, MapError> { - let data = map.as_ref(); + let data = map.borrow(); let expected = mem::size_of::(); let size = data.obj.key_size() as usize; if size != expected { @@ -102,7 +102,7 @@ impl> StackTraceMap { /// Returns [`MapError::KeyNotFound`] if there is no stack trace with the /// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails. pub fn get(&self, stack_id: &u32, flags: u64) -> Result { - let fd = self.inner.as_ref().fd_or_err()?; + let fd = self.inner.borrow().fd_or_err()?; let mut frames = vec![0; self.max_stack_depth]; bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags) @@ -136,13 +136,13 @@ impl> StackTraceMap { /// An iterator visiting all the stack_ids in arbitrary order. The iterator element /// type is `Result`. pub fn stack_ids(&self) -> MapKeys<'_, u32> { - MapKeys::new(self.inner.as_ref()) + MapKeys::new(self.inner.borrow()) } } -impl> IterableMap for StackTraceMap { +impl> IterableMap for StackTraceMap { fn map(&self) -> &MapData { - self.inner.as_ref() + self.inner.borrow() } fn get(&self, index: &u32) -> Result { @@ -150,7 +150,7 @@ impl> IterableMap for StackTraceMap { } } -impl<'a, T: AsRef> IntoIterator for &'a StackTraceMap { +impl<'a, T: Borrow> IntoIterator for &'a StackTraceMap { type Item = Result<(u32, StackTrace), MapError>; type IntoIter = MapIter<'a, u32, StackTrace, StackTraceMap>; diff --git a/aya/src/sys/bpf.rs b/aya/src/sys/bpf.rs index 101823d7..39b1eb48 100644 --- a/aya/src/sys/bpf.rs +++ b/aya/src/sys/bpf.rs @@ -66,7 +66,7 @@ pub(crate) fn bpf_create_map(name: &CStr, def: &obj::Map, btf_fd: Option) _ => { u.btf_key_type_id = m.def.btf_key_type_id; u.btf_value_type_id = m.def.btf_value_type_id; - u.btf_fd = btf_fd.unwrap() as u32; + u.btf_fd = btf_fd.unwrap_or_default() as u32; } } } @@ -599,6 +599,37 @@ pub(crate) fn is_prog_name_supported() -> bool { } } +pub(crate) fn is_probe_read_kernel_supported() -> bool { + let mut attr = unsafe { mem::zeroed::() }; + let u = unsafe { &mut attr.__bindgen_anon_3 }; + + let prog: &[u8] = &[ + 0xbf, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = r10 + 0x07, 0x01, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, // r1 -= 8 + 0xb7, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // r2 = 8 + 0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0 + 0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, // call 113 + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit + ]; + + let gpl = b"GPL\0"; + u.license = gpl.as_ptr() as u64; + + let insns = copy_instructions(prog).unwrap(); + u.insn_cnt = insns.len() as u32; + u.insns = insns.as_ptr() as u64; + u.prog_type = bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT as u32; + + match sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) { + Ok(v) => { + let fd = v as RawFd; + unsafe { close(fd) }; + true + } + Err(_) => false, + } +} + pub(crate) fn is_perf_link_supported() -> bool { let mut attr = unsafe { mem::zeroed::() }; let u = unsafe { &mut attr.__bindgen_anon_3 }; diff --git a/aya/src/sys/mod.rs b/aya/src/sys/mod.rs index 129889c5..294ff386 100644 --- a/aya/src/sys/mod.rs +++ b/aya/src/sys/mod.rs @@ -8,6 +8,8 @@ mod fake; use std::io; #[cfg(not(test))] use std::{ffi::CString, mem}; +#[cfg(not(test))] +use std::{fs::File, io::Read}; #[cfg(not(test))] use libc::utsname; @@ -82,8 +84,40 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { Ok((0xff, 0xff, 0xff)) } +#[cfg(not(test))] +fn ubuntu_kernel_version() -> Result<(u32, u32, u32), ()> { + if let Ok(mut file) = File::open("/proc/version_signature") { + let mut buf = String::new(); + let mut major = 0u32; + let mut minor = 0u32; + let mut patch = 0u32; + let format = CString::new("%*s %*s %u.%u.%u\n").unwrap(); + + file.read_to_string(&mut buf).map_err(|_| ())?; + + unsafe { + if libc::sscanf( + buf.as_ptr() as *const _, + format.as_ptr(), + &mut major as *mut u32, + &mut minor as *mut _, + &mut patch as *mut _, + ) == 3 + { + return Ok((major, minor, patch)); + } + } + } + + Err(()) +} + #[cfg(not(test))] pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { + if let Ok(version) = ubuntu_kernel_version() { + return Ok(version); + } + unsafe { let mut v = mem::zeroed::(); if libc::uname(&mut v as *mut _) != 0 { @@ -93,6 +127,33 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> { let mut major = 0u32; let mut minor = 0u32; let mut patch = 0u32; + + let debian_marker = CString::new("Debian").unwrap(); + + let p = libc::strstr(v.version.as_ptr(), debian_marker.as_ptr()); + + if !p.is_null() { + let debian_format = CString::new("Debian %u.%u.%u").map_err(|_| ())?; + + if libc::sscanf( + p, + debian_format.as_ptr(), + &mut major as *mut u32, + &mut minor as *mut _, + &mut patch as *mut _, + ) == 3 + { + // On Debian 10, kernels after 4.19.229 expect 4.19.255 due to broken Makefile patches. + let patch_level_limit = if major == 4 && minor == 19 { 230 } else { 255 }; + + if patch >= patch_level_limit { + patch = 255; + } + + return Ok((major, minor, patch)); + } + } + let format = CString::new("%u.%u.%u").unwrap(); if libc::sscanf( v.release.as_ptr(), diff --git a/bpf/aya-bpf/src/programs/sk_buff.rs b/bpf/aya-bpf/src/programs/sk_buff.rs index 669c2c95..ab7f5038 100644 --- a/bpf/aya-bpf/src/programs/sk_buff.rs +++ b/bpf/aya-bpf/src/programs/sk_buff.rs @@ -6,8 +6,8 @@ use core::{ use aya_bpf_bindings::helpers::{ bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace, - bpf_skb_adjust_room, bpf_skb_change_type, bpf_skb_load_bytes, bpf_skb_pull_data, - bpf_skb_store_bytes, + bpf_skb_adjust_room, bpf_skb_change_proto, bpf_skb_change_type, bpf_skb_load_bytes, + bpf_skb_pull_data, bpf_skb_store_bytes, }; use aya_bpf_cty::c_long; @@ -189,6 +189,16 @@ impl SkBuff { } } + #[inline] + pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> { + let ret = unsafe { bpf_skb_change_proto(self.as_ptr() as *mut _, proto, flags) }; + if ret == 0 { + Ok(()) + } else { + Err(ret) + } + } + #[inline] pub fn change_type(&self, ty: u32) -> Result<(), c_long> { let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) }; diff --git a/bpf/aya-bpf/src/programs/tc.rs b/bpf/aya-bpf/src/programs/tc.rs index 1d3133d5..5aa0a173 100644 --- a/bpf/aya-bpf/src/programs/tc.rs +++ b/bpf/aya-bpf/src/programs/tc.rs @@ -142,6 +142,11 @@ impl TcContext { self.skb.clone_redirect(if_index, flags) } + #[inline] + pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> { + self.skb.change_proto(proto, flags) + } + #[inline] pub fn change_type(&self, ty: u32) -> Result<(), c_long> { self.skb.change_type(ty) diff --git a/test/integration-ebpf/Cargo.toml b/test/integration-ebpf/Cargo.toml index 8668b91d..69c60316 100644 --- a/test/integration-ebpf/Cargo.toml +++ b/test/integration-ebpf/Cargo.toml @@ -22,3 +22,7 @@ path = "src/pass.rs" [[bin]] name = "test" path = "src/test.rs" + +[[bin]] +name = "relocations" +path = "src/relocations.rs" diff --git a/test/integration-ebpf/src/bpf/text_64_64_reloc.c b/test/integration-ebpf/src/bpf/text_64_64_reloc.c new file mode 100644 index 00000000..877c7628 --- /dev/null +++ b/test/integration-ebpf/src/bpf/text_64_64_reloc.c @@ -0,0 +1,28 @@ +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u64); + __uint(max_entries, 2); +} RESULTS SEC(".maps"); + +static __u64 +inc_cb(void *map, __u32 *key, void *val, + void *data) +{ + __u64 *value = val; + *value += 1; + return 0; +} + +SEC("uprobe/test_text_64_64_reloc") +int test_text_64_64_reloc(struct pt_regs *ctx) +{ + bpf_for_each_map_elem(&RESULTS, inc_cb, NULL, 0); + return 0; +} + diff --git a/test/integration-ebpf/src/relocations.rs b/test/integration-ebpf/src/relocations.rs new file mode 100644 index 00000000..ee8a21f6 --- /dev/null +++ b/test/integration-ebpf/src/relocations.rs @@ -0,0 +1,45 @@ +#![no_std] +#![no_main] + +use core::hint; + +use aya_bpf::{ + macros::{map, uprobe}, + maps::Array, + programs::ProbeContext, +}; + +#[map] +static mut RESULTS: Array = Array::with_max_entries(3, 0); + +#[uprobe] +pub fn test_64_32_call_relocs(_ctx: ProbeContext) { + // this will link set_result and do a forward call + set_result(0, hint::black_box(1)); + + // set_result is already linked, this will just do the forward call + set_result(1, hint::black_box(2)); + + // this will link set_result_backward after set_result. Then will do a + // backward call to set_result. + set_result_backward(2, hint::black_box(3)); +} + +#[inline(never)] +fn set_result(index: u32, value: u64) { + unsafe { + if let Some(v) = RESULTS.get_ptr_mut(index) { + *v = value; + } + } +} + +#[inline(never)] +fn set_result_backward(index: u32, value: u64) { + set_result(index, value); +} + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + unsafe { core::hint::unreachable_unchecked() } +} diff --git a/test/integration-test-macros/src/lib.rs b/test/integration-test-macros/src/lib.rs index 6f69b486..297159ed 100644 --- a/test/integration-test-macros/src/lib.rs +++ b/test/integration-test-macros/src/lib.rs @@ -10,7 +10,7 @@ pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream { let expanded = quote! { #item - inventory::submit!(IntegrationTest { + inventory::submit!(crate::IntegrationTest { name: concat!(module_path!(), "::", #name_str), test_fn: #name, }); diff --git a/test/integration-test/src/tests/btf_relocations.rs b/test/integration-test/src/tests/btf_relocations.rs new file mode 100644 index 00000000..85291ef4 --- /dev/null +++ b/test/integration-test/src/tests/btf_relocations.rs @@ -0,0 +1,313 @@ +use anyhow::{Context, Result}; +use std::{path::PathBuf, process::Command, thread::sleep, time::Duration}; +use tempfile::TempDir; + +use aya::{maps::Array, programs::TracePoint, BpfLoader, Btf, Endianness}; + +use super::integration_test; + +// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no +// special meaning, they just have "nice" bit patterns that can be helpful while debugging. + +#[integration_test] +fn relocate_field() { + let test = RelocationTest { + local_definition: r#" + struct foo { + __u8 a; + __u8 b; + __u8 c; + __u8 d; + }; + "#, + target_btf: r#" + struct foo { + __u8 a; + __u8 c; + __u8 b; + __u8 d; + } s1; + "#, + relocation_code: r#" + __u8 memory[] = {1, 2, 3, 4}; + struct foo *ptr = (struct foo *) &memory; + value = __builtin_preserve_access_index(ptr->c); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap(), 2); + assert_eq!(test.run_no_btf().unwrap(), 3); +} + +#[integration_test] +fn relocate_enum() { + let test = RelocationTest { + local_definition: r#" + enum foo { D = 0xAAAAAAAA }; + "#, + target_btf: r#" + enum foo { D = 0xBBBBBBBB } e1; + "#, + relocation_code: r#" + #define BPF_ENUMVAL_VALUE 1 + value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap(), 0xBBBBBBBB); + assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA); +} + +#[integration_test] +fn relocate_enum_signed() { + let test = RelocationTest { + local_definition: r#" + enum foo { D = -0x7AAAAAAA }; + "#, + target_btf: r#" + enum foo { D = -0x7BBBBBBB } e1; + "#, + relocation_code: r#" + #define BPF_ENUMVAL_VALUE 1 + value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64); + assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64); +} + +#[integration_test] +fn relocate_enum64() { + let test = RelocationTest { + local_definition: r#" + enum foo { D = 0xAAAAAAAABBBBBBBB }; + "#, + target_btf: r#" + enum foo { D = 0xCCCCCCCCDDDDDDDD } e1; + "#, + relocation_code: r#" + #define BPF_ENUMVAL_VALUE 1 + value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD); + assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB); +} + +#[integration_test] +fn relocate_enum64_signed() { + let test = RelocationTest { + local_definition: r#" + enum foo { D = -0xAAAAAAABBBBBBBB }; + "#, + target_btf: r#" + enum foo { D = -0xCCCCCCCDDDDDDDD } e1; + "#, + relocation_code: r#" + #define BPF_ENUMVAL_VALUE 1 + value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64); + assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64); +} + +#[integration_test] +fn relocate_pointer() { + let test = RelocationTest { + local_definition: r#" + struct foo {}; + struct bar { struct foo *f; }; + "#, + target_btf: r#" + struct foo {}; + struct bar { struct foo *f; }; + "#, + relocation_code: r#" + __u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0}; + struct bar* ptr = (struct bar *) &memory; + value = (__u64) __builtin_preserve_access_index(ptr->f); + "#, + } + .build() + .unwrap(); + assert_eq!(test.run().unwrap(), 42); + assert_eq!(test.run_no_btf().unwrap(), 42); +} + +/// Utility code for running relocation tests: +/// - Generates the eBPF program using probided local definition and relocation code +/// - Generates the BTF from the target btf code +struct RelocationTest { + /// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode + local_definition: &'static str, + /// Target data structure definition. What the vmlinux would actually contain. + target_btf: &'static str, + /// Code executed by the eBPF program to test the relocation. + /// The format should be: + // __u8 memory[] = { ... }; + // __u32 value = BPF_CORE_READ((struct foo *)&memory, ...); + // + // The generated code will be executed by attaching a tracepoint to sched_switch + // and emitting `__u32 value` an a map. See the code template below for more details. + relocation_code: &'static str, +} + +impl RelocationTest { + /// Build a RelocationTestRunner + fn build(&self) -> Result { + Ok(RelocationTestRunner { + ebpf: self.build_ebpf()?, + btf: self.build_btf()?, + }) + } + + /// - Generate the source eBPF filling a template + /// - Compile it with clang + fn build_ebpf(&self) -> Result> { + let local_definition = self.local_definition; + let relocation_code = self.relocation_code; + let (_tmp_dir, compiled_file) = compile(&format!( + r#" + #include + + static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; + + {local_definition} + + struct {{ + int (*type)[BPF_MAP_TYPE_ARRAY]; + __u32 *key; + __u64 *value; + int (*max_entries)[1]; + }} output_map + __attribute__((section(".maps"), used)); + + __attribute__((section("tracepoint/bpf_prog"), used)) + int bpf_prog(void *ctx) {{ + __u32 key = 0; + __u64 value = 0; + {relocation_code} + bpf_map_update_elem(&output_map, &key, &value, BPF_ANY); + return 0; + }} + + char _license[] __attribute__((section("license"), used)) = "GPL"; + "# + )) + .context("Failed to compile eBPF program")?; + let bytecode = + std::fs::read(compiled_file).context("Error reading compiled eBPF program")?; + Ok(bytecode) + } + + /// - Generate the target BTF source with a mock main() + /// - Compile it with clang + /// - Extract the BTF with llvm-objcopy + fn build_btf(&self) -> Result { + let target_btf = self.target_btf; + let relocation_code = self.relocation_code; + // BTF files can be generated and inspected with these commands: + // $ clang -c -g -O2 -target bpf target.c + // $ pahole --btf_encode_detached=target.btf -V target.o + // $ bpftool btf dump file ./target.btf format c + let (tmp_dir, compiled_file) = compile(&format!( + r#" + #include + + {target_btf} + int main() {{ + __u64 value = 0; + // This is needed to make sure to emit BTF for the defined types, + // it could be dead code eliminated if we don't. + {relocation_code}; + return value; + }} + "# + )) + .context("Failed to compile BTF")?; + Command::new("llvm-objcopy") + .current_dir(tmp_dir.path()) + .args(["--dump-section", ".BTF=target.btf"]) + .arg(compiled_file) + .status() + .context("Failed to run llvm-objcopy")? + .success() + .then_some(()) + .context("Failed to extract BTF")?; + let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default()) + .context("Error parsing generated BTF")?; + Ok(btf) + } +} + +/// Compile an eBPF program and return the path of the compiled object. +/// Also returns a TempDir handler, dropping it will clear the created dicretory. +fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> { + let tmp_dir = tempfile::tempdir().context("Error making temp dir")?; + let source = tmp_dir.path().join("source.c"); + std::fs::write(&source, source_code).context("Writing bpf program failed")?; + Command::new("clang") + .current_dir(&tmp_dir) + .args(["-c", "-g", "-O2", "-target", "bpf"]) + .arg(&source) + .status() + .context("Failed to run clang")? + .success() + .then_some(()) + .context("Failed to compile eBPF source")?; + Ok((tmp_dir, source.with_extension("o"))) +} + +struct RelocationTestRunner { + ebpf: Vec, + btf: Btf, +} + +impl RelocationTestRunner { + /// Run test and return the output value + fn run(&self) -> Result { + self.run_internal(true).context("Error running with BTF") + } + + /// Run without loading btf + fn run_no_btf(&self) -> Result { + self.run_internal(false) + .context("Error running without BTF") + } + + fn run_internal(&self, with_relocations: bool) -> Result { + let mut loader = BpfLoader::new(); + if with_relocations { + loader.btf(Some(&self.btf)); + } else { + loader.btf(None); + } + let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?; + let program: &mut TracePoint = bpf + .program_mut("bpf_prog") + .context("bpf_prog not found")? + .try_into() + .context("program not a tracepoint")?; + program.load().context("Loading tracepoint failed")?; + // Attach to sched_switch and wait some time to make sure it executed at least once + program + .attach("sched", "sched_switch") + .context("attach failed")?; + sleep(Duration::from_millis(1000)); + // To inspect the loaded eBPF bytecode, increse the timeout and run: + // $ sudo bpftool prog dump xlated name bpf_prog + + let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap(); + let key = 0; + output_map.get(&key, 0).context("Getting key 0 failed") + } +} diff --git a/test/integration-test/src/tests/elf.rs b/test/integration-test/src/tests/elf.rs index 9522672d..623c9362 100644 --- a/test/integration-test/src/tests/elf.rs +++ b/test/integration-test/src/tests/elf.rs @@ -1,11 +1,11 @@ -use super::{integration_test, IntegrationTest}; +use super::integration_test; use aya::include_bytes_aligned; use object::{Object, ObjectSymbol}; #[integration_test] fn test_maps() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/map_test"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/map_test"); let obj_file = object::File::parse(bytes).unwrap(); if obj_file.section_by_name("maps").is_none() { panic!("No 'maps' ELF section"); diff --git a/test/integration-test/src/tests/load.rs b/test/integration-test/src/tests/load.rs index 8bb9dd0e..9aa21bb5 100644 --- a/test/integration-test/src/tests/load.rs +++ b/test/integration-test/src/tests/load.rs @@ -13,14 +13,14 @@ use log::warn; use crate::tests::kernel_version; -use super::{integration_test, IntegrationTest}; +use super::integration_test; const MAX_RETRIES: u32 = 100; const RETRY_DURATION_MS: u64 = 10; #[integration_test] fn long_name() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/name_test"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/name_test"); let mut bpf = Bpf::load(bytes).unwrap(); let name_prog: &mut Xdp = bpf .program_mut("ihaveaverylongname") @@ -38,7 +38,7 @@ fn long_name() { #[integration_test] fn multiple_btf_maps() { let bytes = - include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap-btf.bpf.o"); + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o"); let mut bpf = Bpf::load(bytes).unwrap(); let map_1: Array<_, u64> = bpf.take_map("map_1").unwrap().try_into().unwrap(); @@ -85,7 +85,7 @@ macro_rules! assert_loaded { #[integration_test] fn unload_xdp() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(bytes).unwrap(); let prog: &mut Xdp = bpf .program_mut("test_unload_xdp") @@ -115,7 +115,7 @@ fn unload_xdp() { #[integration_test] fn unload_kprobe() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(bytes).unwrap(); let prog: &mut KProbe = bpf .program_mut("test_unload_kpr") @@ -150,7 +150,7 @@ fn pin_link() { return; } - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(bytes).unwrap(); let prog: &mut Xdp = bpf .program_mut("test_unload_xdp") @@ -185,7 +185,7 @@ fn pin_lifecycle() { return; } - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass"); // 1. Load Program and Pin { diff --git a/test/integration-test/src/tests/mod.rs b/test/integration-test/src/tests/mod.rs index ddb1b504..127b037d 100644 --- a/test/integration-test/src/tests/mod.rs +++ b/test/integration-test/src/tests/mod.rs @@ -4,6 +4,7 @@ use libc::{uname, utsname}; use regex::Regex; use std::{ffi::CStr, mem}; +pub mod btf_relocations; pub mod elf; pub mod load; pub mod rbpf; diff --git a/test/integration-test/src/tests/rbpf.rs b/test/integration-test/src/tests/rbpf.rs index 92dfac74..e896d262 100644 --- a/test/integration-test/src/tests/rbpf.rs +++ b/test/integration-test/src/tests/rbpf.rs @@ -4,11 +4,11 @@ use std::collections::HashMap; use aya::include_bytes_aligned; use aya_obj::{generated::bpf_insn, Object, ProgramSection}; -use super::{integration_test, IntegrationTest}; +use super::integration_test; #[integration_test] fn run_with_rbpf() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass"); let object = Object::parse(bytes).unwrap(); assert_eq!(object.programs.len(), 1); @@ -36,7 +36,7 @@ static mut MULTIMAP_MAPS: [*mut Vec; 2] = [null_mut(), null_mut()]; #[integration_test] fn use_map_with_rbpf() { let bytes = - include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap-btf.bpf.o"); + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o"); let mut object = Object::parse(bytes).unwrap(); assert_eq!(object.programs.len(), 1); @@ -69,14 +69,20 @@ fn use_map_with_rbpf() { } } + let text_sections = object + .functions + .iter() + .map(|((section_index, _), _)| *section_index) + .collect(); object .relocate_maps( maps.iter() .map(|(s, (fd, map))| (s.as_ref() as &str, Some(*fd), map)), + &text_sections, ) .expect("Relocation failed"); // Actually there is no local function call involved. - object.relocate_calls().unwrap(); + object.relocate_calls(&text_sections).unwrap(); // Executes the program assert_eq!(object.programs.len(), 1); diff --git a/test/integration-test/src/tests/relocations.rs b/test/integration-test/src/tests/relocations.rs index 53de8b5d..8a97a822 100644 --- a/test/integration-test/src/tests/relocations.rs +++ b/test/integration-test/src/tests/relocations.rs @@ -1,313 +1,70 @@ -use anyhow::{Context, Result}; -use std::{path::PathBuf, process::Command, thread::sleep, time::Duration}; -use tempfile::TempDir; +use std::{process::exit, time::Duration}; -use aya::{maps::Array, programs::TracePoint, BpfLoader, Btf, Endianness}; - -use super::{integration_test, IntegrationTest}; - -// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no -// special meaning, they just have "nice" bit patterns that can be helpful while debugging. +use aya::{ + include_bytes_aligned, + programs::{ProgramError, UProbe}, + Bpf, +}; +use integration_test_macros::integration_test; #[integration_test] -fn relocate_field() { - let test = RelocationTest { - local_definition: r#" - struct foo { - __u8 a; - __u8 b; - __u8 c; - __u8 d; - }; - "#, - target_btf: r#" - struct foo { - __u8 a; - __u8 c; - __u8 b; - __u8 d; - } s1; - "#, - relocation_code: r#" - __u8 memory[] = {1, 2, 3, 4}; - struct foo *ptr = (struct foo *) &memory; - value = __builtin_preserve_access_index(ptr->c); - "#, - } - .build() - .unwrap(); - assert_eq!(test.run().unwrap(), 2); - assert_eq!(test.run_no_btf().unwrap(), 3); +fn relocations() { + let bpf = load_and_attach( + "test_64_32_call_relocs", + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/relocations"), + ); + + trigger_relocations_program(); + std::thread::sleep(Duration::from_millis(100)); + + let m = aya::maps::Array::<_, u64>::try_from(bpf.map("RESULTS").unwrap()).unwrap(); + assert_eq!(m.get(&0, 0).unwrap(), 1); + assert_eq!(m.get(&1, 0).unwrap(), 2); + assert_eq!(m.get(&2, 0).unwrap(), 3); } #[integration_test] -fn relocate_enum() { - let test = RelocationTest { - local_definition: r#" - enum foo { D = 0xAAAAAAAA }; - "#, - target_btf: r#" - enum foo { D = 0xBBBBBBBB } e1; - "#, - relocation_code: r#" - #define BPF_ENUMVAL_VALUE 1 - value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); - "#, - } - .build() - .unwrap(); - assert_eq!(test.run().unwrap(), 0xBBBBBBBB); - assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA); -} +fn text_64_64_reloc() { + let mut bpf = load_and_attach( + "test_text_64_64_reloc", + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/text_64_64_reloc.o"), + ); -#[integration_test] -fn relocate_enum_signed() { - let test = RelocationTest { - local_definition: r#" - enum foo { D = -0x7AAAAAAA }; - "#, - target_btf: r#" - enum foo { D = -0x7BBBBBBB } e1; - "#, - relocation_code: r#" - #define BPF_ENUMVAL_VALUE 1 - value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); - "#, - } - .build() - .unwrap(); - assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64); - assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64); -} + let mut m = aya::maps::Array::<_, u64>::try_from(bpf.map_mut("RESULTS").unwrap()).unwrap(); + m.set(0, 1, 0).unwrap(); + m.set(1, 2, 0).unwrap(); -#[integration_test] -fn relocate_enum64() { - let test = RelocationTest { - local_definition: r#" - enum foo { D = 0xAAAAAAAABBBBBBBB }; - "#, - target_btf: r#" - enum foo { D = 0xCCCCCCCCDDDDDDDD } e1; - "#, - relocation_code: r#" - #define BPF_ENUMVAL_VALUE 1 - value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); - "#, - } - .build() - .unwrap(); - assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD); - assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB); -} + trigger_relocations_program(); + std::thread::sleep(Duration::from_millis(100)); -#[integration_test] -fn relocate_enum64_signed() { - let test = RelocationTest { - local_definition: r#" - enum foo { D = -0xAAAAAAABBBBBBBB }; - "#, - target_btf: r#" - enum foo { D = -0xCCCCCCCDDDDDDDD } e1; - "#, - relocation_code: r#" - #define BPF_ENUMVAL_VALUE 1 - value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE); - "#, - } - .build() - .unwrap(); - assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64); - assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64); + assert_eq!(m.get(&0, 0).unwrap(), 2); + assert_eq!(m.get(&1, 0).unwrap(), 3); } -#[integration_test] -fn relocate_pointer() { - let test = RelocationTest { - local_definition: r#" - struct foo {}; - struct bar { struct foo *f; }; - "#, - target_btf: r#" - struct foo {}; - struct bar { struct foo *f; }; - "#, - relocation_code: r#" - __u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0}; - struct bar* ptr = (struct bar *) &memory; - value = (__u64) __builtin_preserve_access_index(ptr->f); - "#, - } - .build() +fn load_and_attach(name: &str, bytes: &[u8]) -> Bpf { + let mut bpf = Bpf::load(bytes).unwrap(); + + let prog: &mut UProbe = bpf.program_mut(name).unwrap().try_into().unwrap(); + if let Err(ProgramError::LoadError { + io_error, + verifier_log, + }) = prog.load() + { + println!("Failed to load program `{name}`: {io_error}. Verifier log:\n{verifier_log:#}"); + exit(1); + }; + + prog.attach( + Some("trigger_relocations_program"), + 0, + "/proc/self/exe", + None, + ) .unwrap(); - assert_eq!(test.run().unwrap(), 42); - assert_eq!(test.run_no_btf().unwrap(), 42); -} -/// Utility code for running relocation tests: -/// - Generates the eBPF program using probided local definition and relocation code -/// - Generates the BTF from the target btf code -struct RelocationTest { - /// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode - local_definition: &'static str, - /// Target data structure definition. What the vmlinux would actually contain. - target_btf: &'static str, - /// Code executed by the eBPF program to test the relocation. - /// The format should be: - // __u8 memory[] = { ... }; - // __u32 value = BPF_CORE_READ((struct foo *)&memory, ...); - // - // The generated code will be executed by attaching a tracepoint to sched_switch - // and emitting `__u32 value` an a map. See the code template below for more details. - relocation_code: &'static str, + bpf } -impl RelocationTest { - /// Build a RelocationTestRunner - fn build(&self) -> Result { - Ok(RelocationTestRunner { - ebpf: self.build_ebpf()?, - btf: self.build_btf()?, - }) - } - - /// - Generate the source eBPF filling a template - /// - Compile it with clang - fn build_ebpf(&self) -> Result> { - let local_definition = self.local_definition; - let relocation_code = self.relocation_code; - let (_tmp_dir, compiled_file) = compile(&format!( - r#" - #include - - static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2; - - {local_definition} - - struct {{ - int (*type)[BPF_MAP_TYPE_ARRAY]; - __u32 *key; - __u64 *value; - int (*max_entries)[1]; - }} output_map - __attribute__((section(".maps"), used)); - - __attribute__((section("tracepoint/bpf_prog"), used)) - int bpf_prog(void *ctx) {{ - __u32 key = 0; - __u64 value = 0; - {relocation_code} - bpf_map_update_elem(&output_map, &key, &value, BPF_ANY); - return 0; - }} - - char _license[] __attribute__((section("license"), used)) = "GPL"; - "# - )) - .context("Failed to compile eBPF program")?; - let bytecode = - std::fs::read(compiled_file).context("Error reading compiled eBPF program")?; - Ok(bytecode) - } - - /// - Generate the target BTF source with a mock main() - /// - Compile it with clang - /// - Extract the BTF with llvm-objcopy - fn build_btf(&self) -> Result { - let target_btf = self.target_btf; - let relocation_code = self.relocation_code; - // BTF files can be generated and inspected with these commands: - // $ clang -c -g -O2 -target bpf target.c - // $ pahole --btf_encode_detached=target.btf -V target.o - // $ bpftool btf dump file ./target.btf format c - let (tmp_dir, compiled_file) = compile(&format!( - r#" - #include - - {target_btf} - int main() {{ - __u64 value = 0; - // This is needed to make sure to emit BTF for the defined types, - // it could be dead code eliminated if we don't. - {relocation_code}; - return value; - }} - "# - )) - .context("Failed to compile BTF")?; - Command::new("llvm-objcopy") - .current_dir(tmp_dir.path()) - .args(["--dump-section", ".BTF=target.btf"]) - .arg(compiled_file) - .status() - .context("Failed to run llvm-objcopy")? - .success() - .then_some(()) - .context("Failed to extract BTF")?; - let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default()) - .context("Error parsing generated BTF")?; - Ok(btf) - } -} - -/// Compile an eBPF program and return the path of the compiled object. -/// Also returns a TempDir handler, dropping it will clear the created dicretory. -fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> { - let tmp_dir = tempfile::tempdir().context("Error making temp dir")?; - let source = tmp_dir.path().join("source.c"); - std::fs::write(&source, source_code).context("Writing bpf program failed")?; - Command::new("clang") - .current_dir(&tmp_dir) - .args(["-c", "-g", "-O2", "-target", "bpf"]) - .arg(&source) - .status() - .context("Failed to run clang")? - .success() - .then_some(()) - .context("Failed to compile eBPF source")?; - Ok((tmp_dir, source.with_extension("o"))) -} - -struct RelocationTestRunner { - ebpf: Vec, - btf: Btf, -} - -impl RelocationTestRunner { - /// Run test and return the output value - fn run(&self) -> Result { - self.run_internal(true).context("Error running with BTF") - } - - /// Run without loading btf - fn run_no_btf(&self) -> Result { - self.run_internal(false) - .context("Error running without BTF") - } - - fn run_internal(&self, with_relocations: bool) -> Result { - let mut loader = BpfLoader::new(); - if with_relocations { - loader.btf(Some(&self.btf)); - } else { - loader.btf(None); - } - let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?; - let program: &mut TracePoint = bpf - .program_mut("bpf_prog") - .context("bpf_prog not found")? - .try_into() - .context("program not a tracepoint")?; - program.load().context("Loading tracepoint failed")?; - // Attach to sched_switch and wait some time to make sure it executed at least once - program - .attach("sched", "sched_switch") - .context("attach failed")?; - sleep(Duration::from_millis(1000)); - // To inspect the loaded eBPF bytecode, increse the timeout and run: - // $ sudo bpftool prog dump xlated name bpf_prog - - let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap(); - let key = 0; - output_map.get(&key, 0).context("Getting key 0 failed") - } -} +#[no_mangle] +#[inline(never)] +pub extern "C" fn trigger_relocations_program() {} diff --git a/test/integration-test/src/tests/smoke.rs b/test/integration-test/src/tests/smoke.rs index a7450064..17abd79b 100644 --- a/test/integration-test/src/tests/smoke.rs +++ b/test/integration-test/src/tests/smoke.rs @@ -5,11 +5,11 @@ use aya::{ }; use log::info; -use super::{integration_test, kernel_version, IntegrationTest}; +use super::{integration_test, kernel_version}; #[integration_test] fn xdp() { - let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass"); + let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass"); let mut bpf = Bpf::load(bytes).unwrap(); let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); dispatcher.load().unwrap(); @@ -28,13 +28,14 @@ fn extension() { } // TODO: Check kernel version == 5.9 or later let main_bytes = - include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/main.bpf.o"); + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/main.bpf.o"); let mut bpf = Bpf::load(main_bytes).unwrap(); let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); pass.load().unwrap(); pass.attach("lo", XdpFlags::default()).unwrap(); - let ext_bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/ext.bpf.o"); + let ext_bytes = + include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/ext.bpf.o"); let mut bpf = BpfLoader::new().extension("drop").load(ext_bytes).unwrap(); let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap(); drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap(); diff --git a/test/run.sh b/test/run.sh index 94600529..a33449eb 100755 --- a/test/run.sh +++ b/test/run.sh @@ -40,7 +40,7 @@ fi # Test Image if [ -z "${AYA_TEST_IMAGE}" ]; then - AYA_TEST_IMAGE="fedora37" + AYA_TEST_IMAGE="fedora38" fi case "${AYA_TEST_IMAGE}" in @@ -59,6 +59,14 @@ download_images() { curl -o "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}" fi ;; + fedora38) + if [ ! -f "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" ]; then + IMAGE="Fedora-Cloud-Base-38_Beta-1.3.${AYA_GUEST_ARCH}.qcow2" + IMAGE_URL="https://fr2.rpmfind.net/linux/fedora/linux/releases/test/38_Beta/Cloud/${AYA_GUEST_ARCH}/images" + echo "Downloading: ${IMAGE}, this may take a while..." + curl -o "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}" + fi + ;; centos8) if [ ! -f "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" ]; then IMAGE="CentOS-8-GenericCloud-8.4.2105-20210603.0.${AYA_GUEST_ARCH}.qcow2" @@ -181,6 +189,9 @@ EOF echo "VM launched" exec_vm uname -a + echo "Enabling testing repositories" + exec_vm sudo dnf config-manager --set-enabled updates-testing + exec_vm sudo dnf config-manager --set-enabled updates-testing-modular echo "Installing dependencies" exec_vm sudo dnf install -qy bpftool llvm llvm-devel clang clang-devel zlib-devel exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \ diff --git a/xtask/src/build_ebpf.rs b/xtask/src/build_ebpf.rs index 4f98feba..cf2b50f8 100644 --- a/xtask/src/build_ebpf.rs +++ b/xtask/src/build_ebpf.rs @@ -41,9 +41,6 @@ pub struct BuildEbpfOptions { /// Set the endianness of the BPF target #[clap(default_value = "bpfel-unknown-none", long)] pub target: Architecture, - /// Build the release target - #[clap(long)] - pub release: bool, /// Libbpf dir, required for compiling C code #[clap(long, action)] pub libbpf_dir: PathBuf, @@ -59,17 +56,15 @@ fn build_rust_ebpf(opts: &BuildEbpfOptions) -> anyhow::Result<()> { dir.push("test/integration-ebpf"); let target = format!("--target={}", opts.target); - let mut args = vec![ + let args = vec![ "+nightly", "build", + "--release", "--verbose", target.as_str(), "-Z", "build-std=core", ]; - if opts.release { - args.push("--release") - } let status = Command::new("cargo") .current_dir(&dir) .args(&args) @@ -99,7 +94,7 @@ fn build_c_ebpf(opts: &BuildEbpfOptions) -> anyhow::Result<()> { let mut out_path = PathBuf::from(WORKSPACE_ROOT.to_string()); out_path.push("target"); out_path.push(opts.target.to_string()); - out_path.push(if opts.release { "release " } else { "debug" }); + out_path.push("release"); let include_path = out_path.join("include"); get_libbpf_headers(&opts.libbpf_dir, &include_path)?; diff --git a/xtask/src/run.rs b/xtask/src/run.rs index 47ca18cf..c433c3f7 100644 --- a/xtask/src/run.rs +++ b/xtask/src/run.rs @@ -45,7 +45,6 @@ pub fn run(opts: Options) -> Result<(), anyhow::Error> { // build our ebpf program followed by our application build_ebpf(BuildOptions { target: opts.bpf_target, - release: opts.release, libbpf_dir: PathBuf::from(&opts.libbpf_dir), }) .context("Error while building eBPF program")?;