Merge branch 'aya-rs:main' into lsm_sleepable

pull/545/head
Everett Pompeii 2 years ago committed by GitHub
commit 1f2006bfde
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -14,9 +14,6 @@ panic = "abort"
[profile.release]
panic = "abort"
[profile.dev.package.integration-ebpf]
opt-level = 2
overflow-checks = false
[profile.release.package.integration-ebpf]
debug = 2
codegen-units = 1

@ -15,7 +15,7 @@ userspace = [ "aya" ]
[dependencies]
aya = { path = "../aya", version = "0.11.0", optional=true }
num_enum = { version = "0.5", default-features = false }
num_enum = { version = "0.6", default-features = false }
[lib]
path = "src/lib.rs"

@ -432,6 +432,19 @@ impl Btf {
// Sanitize DATASEC if they are not supported
BtfType::DataSec(d) if !features.btf_datasec => {
debug!("{}: not supported. replacing with STRUCT", kind);
// STRUCT aren't allowed to have "." in their name, fixup this if needed.
let mut name_offset = t.name_offset();
let sec_name = self.string_at(name_offset)?;
let name = sec_name.to_string();
// Handle any "." characters in struct names
// Example: ".maps"
let fixed_name = name.replace('.', "_");
if fixed_name != name {
name_offset = self.add_string(fixed_name);
}
let mut members = vec![];
for member in d.entries.iter() {
let mt = types.type_by_id(member.btf_type).unwrap();
@ -441,7 +454,9 @@ impl Btf {
offset: member.offset * 8,
})
}
types.types[i] = BtfType::Struct(Struct::new(t.name_offset(), members, 0));
types.types[i] =
BtfType::Struct(Struct::new(name_offset, members, d.entries.len() as u32));
}
// Fixup DATASEC
// DATASEC sizes aren't always set by LLVM
@ -536,23 +551,40 @@ impl Btf {
types.types[i] = enum_type;
}
// Sanitize FUNC
BtfType::Func(ty) if !features.btf_func => {
BtfType::Func(ty) => {
let name = self.string_at(ty.name_offset)?;
// Sanitize FUNC
if !features.btf_func {
debug!("{}: not supported. replacing with TYPEDEF", kind);
let typedef_type = BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type));
let typedef_type =
BtfType::Typedef(Typedef::new(ty.name_offset, ty.btf_type));
types.types[i] = typedef_type;
}
// Sanitize BTF_FUNC_GLOBAL
BtfType::Func(ty) if !features.btf_func_global => {
} else if !features.btf_func_global
|| name == "memset"
|| name == "memcpy"
|| name == "memmove"
|| name == "memcmp"
{
// Sanitize BTF_FUNC_GLOBAL when not supported and ensure that
// memory builtins are marked as static. Globals are type checked
// and verified separately from their callers, while instead we
// want tracking info (eg bound checks) to be propagated to the
// memory builtins.
let mut fixed_ty = ty.clone();
if ty.linkage() == FuncLinkage::Global {
if !features.btf_func_global {
debug!(
"{}: BTF_FUNC_GLOBAL not supported. replacing with BTF_FUNC_STATIC",
kind
);
} else {
debug!("changing FUNC {name} linkage to BTF_FUNC_STATIC");
}
fixed_ty.set_linkage(FuncLinkage::Static);
}
types.types[i] = BtfType::Func(fixed_ty);
}
}
// Sanitize FLOAT
BtfType::Float(ty) if !features.btf_float => {
debug!("{}: not supported. replacing with STRUCT", kind);
@ -1045,9 +1077,7 @@ mod tests {
let name_offset = btf.add_string("&mut int".to_string());
let ptr_type_id = btf.add_type(BtfType::Ptr(Ptr::new(name_offset, int_type_id)));
let features = BtfFeatures {
..Default::default()
};
let features = Default::default();
btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features)
.unwrap();
@ -1118,7 +1148,7 @@ mod tests {
VarLinkage::Static,
)));
let name_offset = btf.add_string(".data".to_string());
let name_offset = btf.add_string("data".to_string());
let variables = vec![DataSecEntry {
btf_type: var_type_id,
offset: 0,
@ -1352,6 +1382,60 @@ mod tests {
Btf::parse(&raw, Endianness::default()).unwrap();
}
#[test]
fn test_sanitize_mem_builtins() {
let mut btf = Btf::new();
let name_offset = btf.add_string("int".to_string());
let int_type_id = btf.add_type(BtfType::Int(Int::new(
name_offset,
4,
IntEncoding::Signed,
0,
)));
let params = vec![
BtfParam {
name_offset: btf.add_string("a".to_string()),
btf_type: int_type_id,
},
BtfParam {
name_offset: btf.add_string("b".to_string()),
btf_type: int_type_id,
},
];
let func_proto_type_id =
btf.add_type(BtfType::FuncProto(FuncProto::new(params, int_type_id)));
let builtins = ["memset", "memcpy", "memcmp", "memmove"];
for fname in builtins {
let func_name_offset = btf.add_string(fname.to_string());
let func_type_id = btf.add_type(BtfType::Func(Func::new(
func_name_offset,
func_proto_type_id,
FuncLinkage::Global,
)));
let features = BtfFeatures {
btf_func: true,
btf_func_global: true, // to force function name check
..Default::default()
};
btf.fixup_and_sanitize(&HashMap::new(), &HashMap::new(), &features)
.unwrap();
if let BtfType::Func(fixed) = btf.type_by_id(func_type_id).unwrap() {
assert!(fixed.linkage() == FuncLinkage::Static);
} else {
panic!("not a func")
}
// Ensure we can convert to bytes and back again
let raw = btf.to_bytes();
Btf::parse(&raw, Endianness::default()).unwrap();
}
}
#[test]
fn test_sanitize_float() {
let mut btf = Btf::new();

@ -37,8 +37,9 @@
//! let bytes = std::fs::read("program.o").unwrap();
//! let mut object = Object::parse(&bytes).unwrap();
//! // Relocate the programs
//! object.relocate_calls().unwrap();
//! object.relocate_maps(std::iter::empty()).unwrap();
//! let text_sections = std::collections::HashSet::new();
//! object.relocate_calls(&text_sections).unwrap();
//! object.relocate_maps(std::iter::empty(), &text_sections).unwrap();
//!
//! // Run with rbpf
//! let instructions = &object.programs["prog_name"].function.instructions;

@ -2,7 +2,10 @@
use core::mem;
use crate::thiserror::{self, Error};
use crate::{
thiserror::{self, Error},
BpfSectionKind,
};
use alloc::vec::Vec;
/// Invalid map type encontered
@ -139,33 +142,6 @@ pub struct bpf_map_def {
/// The first five __u32 of `bpf_map_def` must be defined.
pub(crate) const MINIMUM_MAP_SIZE: usize = mem::size_of::<u32>() * 5;
/// Kinds of maps
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum MapKind {
/// A map holding `.bss` section data
Bss,
/// A map holding `.data` section data
Data,
/// A map holding `.rodata` section data
Rodata,
/// Other maps
Other,
}
impl From<&str> for MapKind {
fn from(s: &str) -> Self {
if s == ".bss" {
MapKind::Bss
} else if s.starts_with(".data") {
MapKind::Data
} else if s.starts_with(".rodata") {
MapKind::Rodata
} else {
MapKind::Other
}
}
}
/// Map data defined in `maps` or `.maps` sections
#[derive(Debug, Clone)]
pub enum Map {
@ -248,14 +224,6 @@ impl Map {
}
}
/// Returns the map kind
pub fn kind(&self) -> MapKind {
match self {
Map::Legacy(m) => m.kind,
Map::Btf(m) => m.kind,
}
}
/// Returns the section index
pub fn section_index(&self) -> usize {
match self {
@ -264,11 +232,22 @@ impl Map {
}
}
/// Returns the symbol index
pub fn symbol_index(&self) -> usize {
/// Returns the section kind.
pub fn section_kind(&self) -> BpfSectionKind {
match self {
Map::Legacy(m) => m.section_kind,
Map::Btf(_) => BpfSectionKind::BtfMaps,
}
}
/// Returns the symbol index.
///
/// This is `None` for data maps (.bss, .data and .rodata) since those don't
/// need symbols in order to be relocated.
pub fn symbol_index(&self) -> Option<usize> {
match self {
Map::Legacy(m) => m.symbol_index,
Map::Btf(m) => m.symbol_index,
Map::Btf(m) => Some(m.symbol_index),
}
}
}
@ -283,12 +262,16 @@ pub struct LegacyMap {
pub def: bpf_map_def,
/// The section index
pub section_index: usize,
/// The symbol index
pub symbol_index: usize,
/// The section kind
pub section_kind: BpfSectionKind,
/// The symbol index.
///
/// This is None for data maps (.bss .data and .rodata). We don't need
/// symbols to relocate those since they don't contain multiple maps, but
/// are just a flat array of bytes.
pub symbol_index: Option<usize>,
/// The map data
pub data: Vec<u8>,
/// The map kind
pub kind: MapKind,
}
/// A BTF-defined map, most likely from a `.maps` section.
@ -298,6 +281,5 @@ pub struct BtfMap {
pub def: BtfMapDef,
pub(crate) section_index: usize,
pub(crate) symbol_index: usize,
pub(crate) kind: MapKind,
pub(crate) data: Vec<u8>,
}

@ -15,7 +15,9 @@ use object::{
};
use crate::{
maps::{BtfMap, LegacyMap, Map, MapKind, MINIMUM_MAP_SIZE},
btf::BtfFeatures,
generated::{BPF_CALL, BPF_JMP, BPF_K},
maps::{BtfMap, LegacyMap, Map, MINIMUM_MAP_SIZE},
relocation::*,
thiserror::{self, Error},
util::HashMap,
@ -33,6 +35,16 @@ use crate::btf::{Array, DataSecEntry, FuncSecInfo, LineSecInfo};
const KERNEL_VERSION_ANY: u32 = 0xFFFF_FFFE;
/// Features implements BPF and BTF feature detection
#[derive(Default, Debug)]
#[allow(missing_docs)]
pub struct Features {
pub bpf_name: bool,
pub bpf_probe_read_kernel: bool,
pub bpf_perf_link: bool,
pub btf: Option<BtfFeatures>,
}
/// The loaded object file representation
#[derive(Clone)]
pub struct Object {
@ -52,14 +64,13 @@ pub struct Object {
/// in [ProgramSection]s as keys.
pub programs: HashMap<String, Program>,
/// Functions
pub functions: HashMap<u64, Function>,
pub functions: HashMap<(usize, u64), Function>,
pub(crate) relocations: HashMap<SectionIndex, HashMap<u64, Relocation>>,
pub(crate) symbols_by_index: HashMap<usize, Symbol>,
pub(crate) symbol_table: HashMap<usize, Symbol>,
pub(crate) section_sizes: HashMap<String, u64>,
// symbol_offset_by_name caches symbols that could be referenced from a
// BTF VAR type so the offsets can be fixed up
pub(crate) symbol_offset_by_name: HashMap<String, u64>,
pub(crate) text_section_index: Option<usize>,
}
/// An eBPF program
@ -524,7 +535,7 @@ impl Object {
is_definition: symbol.is_definition(),
kind: symbol.kind(),
};
bpf_obj.symbols_by_index.insert(symbol.index().0, sym);
bpf_obj.symbol_table.insert(symbol.index().0, sym);
if symbol.is_global() || symbol.kind() == SymbolKind::Data {
bpf_obj.symbol_offset_by_name.insert(name, symbol.address());
@ -566,17 +577,16 @@ impl Object {
programs: HashMap::new(),
functions: HashMap::new(),
relocations: HashMap::new(),
symbols_by_index: HashMap::new(),
symbol_table: HashMap::new(),
section_sizes: HashMap::new(),
symbol_offset_by_name: HashMap::new(),
text_section_index: None,
}
}
/// Patches map data
pub fn patch_map_data(&mut self, globals: HashMap<&str, &[u8]>) -> Result<(), ParseError> {
let symbols: HashMap<String, &Symbol> = self
.symbols_by_index
.symbol_table
.iter()
.filter(|(_, s)| s.name.is_some())
.map(|(_, s)| (s.name.as_ref().unwrap().clone(), s))
@ -670,12 +680,10 @@ impl Object {
})
}
fn parse_text_section(&mut self, mut section: Section) -> Result<(), ParseError> {
self.text_section_index = Some(section.index.0);
fn parse_text_section(&mut self, section: Section) -> Result<(), ParseError> {
let mut symbols_by_address = HashMap::new();
for sym in self.symbols_by_index.values() {
for sym in self.symbol_table.values() {
if sym.is_definition
&& sym.kind == SymbolKind::Text
&& sym.section_index == Some(section.index.0)
@ -731,7 +739,7 @@ impl Object {
};
self.functions.insert(
sym.address,
(section.index.0, sym.address),
Function {
address,
name: sym.name.clone().unwrap(),
@ -755,7 +763,7 @@ impl Object {
section.index,
section
.relocations
.drain(..)
.into_iter()
.map(|rel| (rel.offset, rel))
.collect(),
);
@ -764,37 +772,6 @@ impl Object {
Ok(())
}
fn parse_map_section(
&mut self,
section: &Section,
symbols: Vec<Symbol>,
) -> Result<(), ParseError> {
if symbols.is_empty() {
return Err(ParseError::NoSymbolsInMapSection {});
}
for (i, sym) in symbols.iter().enumerate() {
let start = sym.address as usize;
let end = start + sym.size as usize;
let data = &section.data[start..end];
let name = sym
.name
.as_ref()
.ok_or(ParseError::MapSymbolNameNotFound { i })?;
let def = parse_map_def(name, data)?;
self.maps.insert(
name.to_string(),
Map::Legacy(LegacyMap {
section_index: section.index.0,
symbol_index: sym.index,
def,
data: Vec::new(),
kind: MapKind::Other,
}),
);
}
Ok(())
}
fn parse_btf_maps(
&mut self,
section: &Section,
@ -827,7 +804,6 @@ impl Object {
def,
section_index: section.index.0,
symbol_index,
kind: MapKind::Other,
data: Vec::new(),
}),
);
@ -838,7 +814,7 @@ impl Object {
Ok(())
}
fn parse_section(&mut self, mut section: Section) -> Result<(), ParseError> {
fn parse_section(&mut self, section: Section) -> Result<(), ParseError> {
let mut parts = section.name.rsplitn(2, '/').collect::<Vec<_>>();
parts.reverse();
@ -853,16 +829,16 @@ impl Object {
self.section_sizes
.insert(section.name.to_owned(), section.size);
match section.kind {
BpfSectionKind::Data => {
BpfSectionKind::Data | BpfSectionKind::Rodata | BpfSectionKind::Bss => {
self.maps
.insert(section.name.to_string(), parse_map(&section, section.name)?);
.insert(section.name.to_string(), parse_data_map_section(&section)?);
}
BpfSectionKind::Text => self.parse_text_section(section)?,
BpfSectionKind::Btf => self.parse_btf(&section)?,
BpfSectionKind::BtfExt => self.parse_btf_ext(&section)?,
BpfSectionKind::BtfMaps => {
let symbols: HashMap<String, Symbol> = self
.symbols_by_index
.symbol_table
.values()
.filter(|s| {
if let Some(idx) = s.section_index {
@ -877,19 +853,24 @@ impl Object {
self.parse_btf_maps(&section, symbols)?
}
BpfSectionKind::Maps => {
let symbols: Vec<Symbol> = self
.symbols_by_index
.values()
.filter(|s| {
if let Some(idx) = s.section_index {
idx == section.index.0
} else {
false
}
})
.cloned()
.collect();
self.parse_map_section(&section, symbols)?
// take out self.maps so we can borrow the iterator below
// without cloning or collecting
let mut maps = mem::take(&mut self.maps);
// extract the symbols for the .maps section, we'll need them
// during parsing
let symbols = self.symbol_table.values().filter(|s| {
s.section_index
.map(|idx| idx == section.index.0)
.unwrap_or(false)
});
let res = parse_maps_section(&mut maps, &section, symbols);
// put the maps back
self.maps = maps;
res?
}
BpfSectionKind::Program => {
let program = self.parse_program(&section)?;
@ -900,7 +881,7 @@ impl Object {
section.index,
section
.relocations
.drain(..)
.into_iter()
.map(|rel| (rel.offset, rel))
.collect(),
);
@ -911,6 +892,91 @@ impl Object {
Ok(())
}
/// Sanitize BPF programs.
pub fn sanitize_programs(&mut self, features: &Features) {
for program in self.programs.values_mut() {
program.sanitize(features);
}
}
}
fn insn_is_helper_call(ins: &bpf_insn) -> bool {
let klass = (ins.code & 0x07) as u32;
let op = (ins.code & 0xF0) as u32;
let src = (ins.code & 0x08) as u32;
klass == BPF_JMP && op == BPF_CALL && src == BPF_K && ins.src_reg() == 0 && ins.dst_reg() == 0
}
const BPF_FUNC_PROBE_READ: i32 = 4;
const BPF_FUNC_PROBE_READ_STR: i32 = 45;
const BPF_FUNC_PROBE_READ_USER: i32 = 112;
const BPF_FUNC_PROBE_READ_KERNEL: i32 = 113;
const BPF_FUNC_PROBE_READ_USER_STR: i32 = 114;
const BPF_FUNC_PROBE_READ_KERNEL_STR: i32 = 115;
impl Program {
fn sanitize(&mut self, features: &Features) {
for inst in &mut self.function.instructions {
if !insn_is_helper_call(inst) {
continue;
}
match inst.imm {
BPF_FUNC_PROBE_READ_USER | BPF_FUNC_PROBE_READ_KERNEL
if !features.bpf_probe_read_kernel =>
{
inst.imm = BPF_FUNC_PROBE_READ;
}
BPF_FUNC_PROBE_READ_USER_STR | BPF_FUNC_PROBE_READ_KERNEL_STR
if !features.bpf_probe_read_kernel =>
{
inst.imm = BPF_FUNC_PROBE_READ_STR;
}
_ => {}
}
}
}
}
// Parses multiple map definition contained in a single `maps` section (which is
// different from `.maps` which is used for BTF). We can tell where each map is
// based on the symbol table.
fn parse_maps_section<'a, I: Iterator<Item = &'a Symbol>>(
maps: &mut HashMap<String, Map>,
section: &Section,
symbols: I,
) -> Result<(), ParseError> {
let mut have_symbols = false;
// each symbol in the section is a separate map
for (i, sym) in symbols.enumerate() {
let start = sym.address as usize;
let end = start + sym.size as usize;
let data = &section.data[start..end];
let name = sym
.name
.as_ref()
.ok_or(ParseError::MapSymbolNameNotFound { i })?;
let def = parse_map_def(name, data)?;
maps.insert(
name.to_string(),
Map::Legacy(LegacyMap {
section_index: section.index.0,
section_kind: section.kind,
symbol_index: Some(sym.index),
def,
data: Vec::new(),
}),
);
have_symbols = true;
}
if !have_symbols {
return Err(ParseError::NoSymbolsForMapsSection);
}
Ok(())
}
/// Errors caught during parsing the object file
@ -976,25 +1042,40 @@ pub enum ParseError {
#[error("the map number {i} in the `maps` section doesn't have a symbol name")]
MapSymbolNameNotFound { i: usize },
#[error("no symbols found for the maps included in the maps section")]
NoSymbolsInMapSection {},
#[error("no symbols for `maps` section, can't parse maps")]
NoSymbolsForMapsSection,
/// No BTF parsed for object
#[error("no BTF parsed for object")]
NoBTF,
}
#[derive(Debug)]
enum BpfSectionKind {
/// The kind of an ELF section.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum BpfSectionKind {
/// Undefined
Undefined,
/// `maps`
Maps,
/// `.maps`
BtfMaps,
/// A program section
Program,
/// `.data`
Data,
/// `.rodata`
Rodata,
/// `.bss`
Bss,
/// `.text`
Text,
/// `.BTF`
Btf,
/// `.BTF.ext`
BtfExt,
/// `license`
License,
/// `version`
Version,
}
@ -1072,6 +1153,7 @@ impl<'data, 'file, 'a> TryFrom<&'a ObjSection<'data, 'file>> for Section<'a> {
_ => return Err(ParseError::UnsupportedRelocationTarget),
},
offset,
size: r.size(),
})
})
.collect::<Result<Vec<_>, _>>()?,
@ -1161,10 +1243,11 @@ impl From<KernelVersion> for u32 {
}
}
fn parse_map(section: &Section, name: &str) -> Result<Map, ParseError> {
let kind = MapKind::from(name);
let (def, data) = match kind {
MapKind::Bss | MapKind::Data | MapKind::Rodata => {
// Parsed '.bss' '.data' and '.rodata' sections. These sections are arrays of
// bytes and are relocated based on their section index.
fn parse_data_map_section(section: &Section) -> Result<Map, ParseError> {
let (def, data) = match section.kind {
BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata => {
let def = bpf_map_def {
map_type: BPF_MAP_TYPE_ARRAY as u32,
key_size: mem::size_of::<u32>() as u32,
@ -1172,7 +1255,7 @@ fn parse_map(section: &Section, name: &str) -> Result<Map, ParseError> {
// .bss will always have data.len() == 0
value_size: section.size as u32,
max_entries: 1,
map_flags: if kind == MapKind::Rodata {
map_flags: if section.kind == BpfSectionKind::Rodata {
BPF_F_RDONLY_PROG
} else {
0
@ -1181,14 +1264,15 @@ fn parse_map(section: &Section, name: &str) -> Result<Map, ParseError> {
};
(def, section.data.to_vec())
}
MapKind::Other => (parse_map_def(name, section.data)?, Vec::new()),
_ => unreachable!(),
};
Ok(Map::Legacy(LegacyMap {
section_index: section.index.0,
symbol_index: 0,
section_kind: section.kind,
// Data maps don't require symbols to be relocated
symbol_index: None,
def,
data,
kind,
}))
}
@ -1308,8 +1392,6 @@ pub fn parse_map_info(info: bpf_map_info, pinned: PinningType) -> Map {
section_index: 0,
symbol_index: 0,
data: Vec::new(),
// We should never be loading the .bss or .data or .rodata FDs
kind: MapKind::Other,
})
} else {
Map::Legacy(LegacyMap {
@ -1323,10 +1405,9 @@ pub fn parse_map_info(info: bpf_map_info, pinned: PinningType) -> Map {
id: info.id,
},
section_index: 0,
symbol_index: 0,
symbol_index: None,
section_kind: BpfSectionKind::Undefined,
data: Vec::new(),
// We should never be loading the .bss or .data or .rodata FDs
kind: MapKind::Other,
})
}
}
@ -1375,8 +1456,8 @@ mod tests {
}
fn fake_sym(obj: &mut Object, section_index: usize, address: u64, name: &str, size: u64) {
let idx = obj.symbols_by_index.len();
obj.symbols_by_index.insert(
let idx = obj.symbol_table.len();
obj.symbol_table.insert(
idx + 1,
Symbol {
index: idx + 1,
@ -1512,65 +1593,21 @@ mod tests {
assert_eq!(parse_map_def("foo", &buf).unwrap(), def);
}
#[test]
fn test_parse_map_error() {
assert!(matches!(
parse_map(&fake_section(BpfSectionKind::Maps, "maps/foo", &[]), "foo",),
Err(ParseError::InvalidMapDefinition { .. })
));
}
#[test]
fn test_parse_map() {
assert!(matches!(
parse_map(
&fake_section(
BpfSectionKind::Maps,
"maps/foo",
bytes_of(&bpf_map_def {
map_type: 1,
key_size: 2,
value_size: 3,
max_entries: 4,
map_flags: 5,
id: 0,
pinning: PinningType::None,
})
),
"foo"
),
Ok(Map::Legacy(LegacyMap{
section_index: 0,
def: bpf_map_def {
map_type: 1,
key_size: 2,
value_size: 3,
max_entries: 4,
map_flags: 5,
id: 0,
pinning: PinningType::None,
},
data,
..
})) if data.is_empty()
))
}
#[test]
fn test_parse_map_data() {
let map_data = b"map data";
assert!(matches!(
parse_map(
parse_data_map_section(
&fake_section(
BpfSectionKind::Data,
".bss",
map_data,
),
".bss"
),
Ok(Map::Legacy(LegacyMap {
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Data,
symbol_index: None,
def: bpf_map_def {
map_type: _map_type,
key_size: 4,
@ -1581,8 +1618,7 @@ mod tests {
pinning: PinningType::None,
},
data,
kind
})) if data == map_data && value_size == map_data.len() as u32 && kind == MapKind::Bss
})) if data == map_data && value_size == map_data.len() as u32
))
}
@ -2253,12 +2289,12 @@ mod tests {
pinning: PinningType::None,
},
section_index: 1,
symbol_index: 1,
section_kind: BpfSectionKind::Rodata,
symbol_index: Some(1),
data: vec![0, 0, 0],
kind: MapKind::Rodata,
}),
);
obj.symbols_by_index.insert(
obj.symbol_table.insert(
1,
Symbol {
index: 1,

@ -1,6 +1,7 @@
//! Program relocation handling.
use core::mem;
use std::collections::HashSet;
use alloc::{borrow::ToOwned, string::String};
use log::debug;
@ -15,6 +16,7 @@ use crate::{
obj::{Function, Object, Program},
thiserror::{self, Error},
util::HashMap,
BpfSectionKind,
};
pub(crate) const INS_SIZE: usize = mem::size_of::<bpf_insn>();
@ -84,6 +86,7 @@ pub enum RelocationError {
pub(crate) struct Relocation {
// byte offset of the instruction to be relocated
pub(crate) offset: u64,
pub(crate) size: u8,
// index of the symbol to relocate to
pub(crate) symbol_index: usize,
}
@ -104,12 +107,15 @@ impl Object {
pub fn relocate_maps<'a, I: Iterator<Item = (&'a str, Option<i32>, &'a Map)>>(
&mut self,
maps: I,
text_sections: &HashSet<usize>,
) -> Result<(), BpfRelocationError> {
let mut maps_by_section = HashMap::new();
let mut maps_by_symbol = HashMap::new();
for (name, fd, map) in maps {
maps_by_section.insert(map.section_index(), (name, fd, map));
maps_by_symbol.insert(map.symbol_index(), (name, fd, map));
if let Some(index) = map.symbol_index() {
maps_by_symbol.insert(index, (name, fd, map));
}
}
let functions = self
@ -125,8 +131,8 @@ impl Object {
relocations.values(),
&maps_by_section,
&maps_by_symbol,
&self.symbols_by_index,
self.text_section_index,
&self.symbol_table,
text_sections,
)
.map_err(|error| BpfRelocationError {
function: function.name.clone(),
@ -139,13 +145,16 @@ impl Object {
}
/// Relocates function calls
pub fn relocate_calls(&mut self) -> Result<(), BpfRelocationError> {
pub fn relocate_calls(
&mut self,
text_sections: &HashSet<usize>,
) -> Result<(), BpfRelocationError> {
for (name, program) in self.programs.iter_mut() {
let linker = FunctionLinker::new(
self.text_section_index,
&self.functions,
&self.relocations,
&self.symbols_by_index,
&self.symbol_table,
text_sections,
);
linker.link(program).map_err(|error| BpfRelocationError {
function: name.to_owned(),
@ -163,7 +172,7 @@ fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
maps_by_section: &HashMap<usize, (&str, Option<i32>, &Map)>,
maps_by_symbol: &HashMap<usize, (&str, Option<i32>, &Map)>,
symbol_table: &HashMap<usize, Symbol>,
text_section_index: Option<usize>,
text_sections: &HashSet<usize>,
) -> Result<(), RelocationError> {
let section_offset = fun.section_offset;
let instructions = &mut fun.instructions;
@ -193,34 +202,54 @@ fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
index: rel.symbol_index,
})?;
let section_index = match sym.section_index {
Some(index) => index,
let Some(section_index) = sym.section_index else {
// this is not a map relocation
None => continue,
continue;
};
// calls and relocation to .text symbols are handled in a separate step
if insn_is_call(&instructions[ins_index]) || sym.section_index == text_section_index {
if insn_is_call(&instructions[ins_index]) || text_sections.contains(&section_index) {
continue;
}
let (name, fd, map) = if maps_by_symbol.contains_key(&rel.symbol_index) {
maps_by_symbol
.get(&rel.symbol_index)
.ok_or(RelocationError::SectionNotFound {
symbol_index: rel.symbol_index,
symbol_name: sym.name.clone(),
section_index,
})?
let (name, fd, map) = if let Some(m) = maps_by_symbol.get(&rel.symbol_index) {
let map = &m.2;
debug!(
"relocating map by symbol index {:?}, kind {:?} at insn {ins_index} in section {}",
map.symbol_index(),
map.section_kind(),
fun.section_index.0
);
debug_assert_eq!(map.symbol_index().unwrap(), rel.symbol_index);
m
} else {
maps_by_section
.get(&section_index)
.ok_or(RelocationError::SectionNotFound {
let Some(m) = maps_by_section.get(&section_index) else {
debug!(
"failed relocating map by section index {}",
section_index
);
return Err(RelocationError::SectionNotFound {
symbol_index: rel.symbol_index,
symbol_name: sym.name.clone(),
section_index,
})?
});
};
let map = &m.2;
debug!(
"relocating map by section index {}, kind {:?} at insn {ins_index} in section {}",
map.section_index(),
map.section_kind(),
fun.section_index.0,
);
debug_assert_eq!(map.symbol_index(), None);
debug_assert!(matches!(
map.section_kind(),
BpfSectionKind::Bss | BpfSectionKind::Data | BpfSectionKind::Rodata
),);
m
};
debug_assert_eq!(map.section_index(), section_index);
let map_fd = fd.ok_or_else(|| RelocationError::MapNotCreated {
name: (*name).into(),
@ -240,26 +269,26 @@ fn relocate_maps<'a, I: Iterator<Item = &'a Relocation>>(
}
struct FunctionLinker<'a> {
text_section_index: Option<usize>,
functions: &'a HashMap<u64, Function>,
functions: &'a HashMap<(usize, u64), Function>,
linked_functions: HashMap<u64, usize>,
relocations: &'a HashMap<SectionIndex, HashMap<u64, Relocation>>,
symbol_table: &'a HashMap<usize, Symbol>,
text_sections: &'a HashSet<usize>,
}
impl<'a> FunctionLinker<'a> {
fn new(
text_section_index: Option<usize>,
functions: &'a HashMap<u64, Function>,
functions: &'a HashMap<(usize, u64), Function>,
relocations: &'a HashMap<SectionIndex, HashMap<u64, Relocation>>,
symbol_table: &'a HashMap<usize, Symbol>,
text_sections: &'a HashSet<usize>,
) -> FunctionLinker<'a> {
FunctionLinker {
text_section_index,
functions,
linked_functions: HashMap::new(),
relocations,
symbol_table,
text_sections,
}
}
@ -289,6 +318,10 @@ impl<'a> FunctionLinker<'a> {
// at `start_ins`. We'll use `start_ins` to do pc-relative calls.
let start_ins = program.instructions.len();
program.instructions.extend(&fun.instructions);
debug!(
"linked function `{}` at instruction {}",
fun.name, start_ins
);
// link func and line info into the main program
// the offset needs to be adjusted
@ -305,101 +338,110 @@ impl<'a> FunctionLinker<'a> {
fn relocate(&mut self, program: &mut Function, fun: &Function) -> Result<(), RelocationError> {
let relocations = self.relocations.get(&fun.section_index);
debug!("relocating program {} function {}", program.name, fun.name);
let n_instructions = fun.instructions.len();
let start_ins = program.instructions.len() - n_instructions;
debug!(
"relocating program `{}` function `{}` size {}",
program.name, fun.name, n_instructions
);
// process all the instructions. We can't only loop over relocations since we need to
// patch pc-relative calls too.
for ins_index in start_ins..start_ins + n_instructions {
let ins = program.instructions[ins_index];
let is_call = insn_is_call(&ins);
// only resolve relocations for calls or for instructions that
// reference symbols in the .text section (eg let callback =
// &some_fun)
let rel = if let Some(relocations) = relocations {
self.text_relocation_info(
relocations,
(fun.section_offset + (ins_index - start_ins) * INS_SIZE) as u64,
)?
// if not a call and not a .text reference, ignore the
// relocation (see relocate_maps())
.and_then(|(_, sym)| {
if is_call {
return Some(sym.address);
}
match sym.kind {
SymbolKind::Text => Some(sym.address),
SymbolKind::Section if sym.section_index == self.text_section_index => {
Some(sym.address + ins.imm as u64)
}
_ => None,
}
let rel = relocations
.and_then(|relocations| {
relocations
.get(&((fun.section_offset + (ins_index - start_ins) * INS_SIZE) as u64))
})
} else {
None
};
.and_then(|rel| {
// get the symbol for the relocation
self.symbol_table
.get(&rel.symbol_index)
.map(|sym| (rel, sym))
})
.filter(|(_rel, sym)| {
// only consider text relocations, data relocations are
// relocated in relocate_maps()
sym.kind == SymbolKind::Text
|| sym
.section_index
.map(|section_index| self.text_sections.contains(&section_index))
.unwrap_or(false)
});
// some_fun() or let x = &some_fun trigger linking, everything else
// can be ignored here
// not a call and not a text relocation, we don't need to do anything
if !is_call && rel.is_none() {
continue;
}
let callee_address = if let Some(address) = rel {
// We have a relocation entry for the instruction at `ins_index`, the address of
// the callee is the address of the relocation's target symbol.
address
let (callee_section_index, callee_address) = if let Some((rel, sym)) = rel {
let address = match sym.kind {
SymbolKind::Text => sym.address,
// R_BPF_64_32 this is a call
SymbolKind::Section if rel.size == 32 => {
sym.address + (ins.imm + 1) as u64 * INS_SIZE as u64
}
// R_BPF_64_64 this is a ld_imm64 text relocation
SymbolKind::Section if rel.size == 64 => sym.address + ins.imm as u64,
_ => todo!(), // FIXME: return an error here,
};
(sym.section_index.unwrap(), address)
} else {
// The caller and the callee are in the same ELF section and this is a pc-relative
// call. Resolve the pc-relative imm to an absolute address.
let ins_size = INS_SIZE as i64;
(
fun.section_index.0,
(fun.section_offset as i64
+ ((ins_index - start_ins) as i64) * ins_size
+ (ins.imm + 1) as i64 * ins_size) as u64
+ (ins.imm + 1) as i64 * ins_size) as u64,
)
};
debug!(
"relocating {} to callee address {} ({})",
"relocating {} to callee address {:#x} in section {} ({}) at instruction {ins_index}",
if is_call { "call" } else { "reference" },
callee_address,
callee_section_index,
if rel.is_some() {
"relocation"
} else {
"relative"
"pc-relative"
},
);
// lookup and link the callee if it hasn't been linked already. `callee_ins_index` will
// contain the instruction index of the callee inside the program.
let callee =
self.functions
.get(&callee_address)
let callee = self
.functions
.get(&(callee_section_index, callee_address))
.ok_or(RelocationError::UnknownFunction {
address: callee_address,
caller_name: fun.name.clone(),
})?;
debug!("callee is {}", callee.name);
debug!("callee is `{}`", callee.name);
let callee_ins_index = self.link_function(program, callee)?;
let callee_ins_index = self.link_function(program, callee)? as i32;
let mut ins = &mut program.instructions[ins_index];
ins.imm = if callee_ins_index < ins_index {
-((ins_index - callee_ins_index + 1) as i32)
} else {
(callee_ins_index - ins_index - 1) as i32
};
let ins_index = ins_index as i32;
ins.imm = callee_ins_index - ins_index - 1;
debug!(
"callee `{}` is at ins {callee_ins_index}, {} from current instruction {ins_index}",
callee.name, ins.imm
);
if !is_call {
ins.set_src_reg(BPF_PSEUDO_FUNC as u8);
}
}
debug!(
"finished relocating program {} function {}",
"finished relocating program `{}` function `{}`",
program.name, fun.name
);
@ -438,25 +480,6 @@ impl<'a> FunctionLinker<'a> {
}
Ok(())
}
fn text_relocation_info(
&self,
relocations: &HashMap<u64, Relocation>,
offset: u64,
) -> Result<Option<(Relocation, Symbol)>, RelocationError> {
if let Some(rel) = relocations.get(&offset) {
let sym =
self.symbol_table
.get(&rel.symbol_index)
.ok_or(RelocationError::UnknownSymbol {
index: rel.symbol_index,
})?;
Ok(Some((*rel, sym.clone())))
} else {
Ok(None)
}
}
}
fn insn_is_call(ins: &bpf_insn) -> bool {
@ -476,7 +499,10 @@ fn insn_is_call(ins: &bpf_insn) -> bool {
mod test {
use alloc::{string::ToString, vec, vec::Vec};
use crate::maps::{bpf_map_def, BtfMap, BtfMapDef, LegacyMap, Map, MapKind};
use crate::{
maps::{BtfMap, LegacyMap, Map},
BpfSectionKind,
};
use super::*;
@ -498,25 +524,20 @@ mod test {
fn fake_legacy_map(symbol_index: usize) -> Map {
Map::Legacy(LegacyMap {
def: bpf_map_def {
..Default::default()
},
def: Default::default(),
section_index: 0,
symbol_index,
section_kind: BpfSectionKind::Undefined,
symbol_index: Some(symbol_index),
data: Vec::new(),
kind: MapKind::Other,
})
}
fn fake_btf_map(symbol_index: usize) -> Map {
Map::Btf(BtfMap {
def: BtfMapDef {
..Default::default()
},
def: Default::default(),
section_index: 0,
symbol_index,
data: Vec::new(),
kind: MapKind::Other,
})
}
@ -549,6 +570,7 @@ mod test {
let relocations = vec![Relocation {
offset: 0x0,
symbol_index: 1,
size: 64,
}];
let maps_by_section = HashMap::new();
@ -561,7 +583,7 @@ mod test {
&maps_by_section,
&maps_by_symbol,
&symbol_table,
None,
&HashSet::new(),
)
.unwrap();
@ -596,10 +618,12 @@ mod test {
Relocation {
offset: 0x0,
symbol_index: 1,
size: 64,
},
Relocation {
offset: mem::size_of::<bpf_insn>() as u64,
symbol_index: 2,
size: 64,
},
];
let maps_by_section = HashMap::new();
@ -617,7 +641,7 @@ mod test {
&maps_by_section,
&maps_by_symbol,
&symbol_table,
None,
&HashSet::new(),
)
.unwrap();
@ -646,6 +670,7 @@ mod test {
let relocations = vec![Relocation {
offset: 0x0,
symbol_index: 1,
size: 64,
}];
let maps_by_section = HashMap::new();
@ -658,7 +683,7 @@ mod test {
&maps_by_section,
&maps_by_symbol,
&symbol_table,
None,
&HashSet::new(),
)
.unwrap();
@ -693,10 +718,12 @@ mod test {
Relocation {
offset: 0x0,
symbol_index: 1,
size: 64,
},
Relocation {
offset: mem::size_of::<bpf_insn>() as u64,
symbol_index: 2,
size: 64,
},
];
let maps_by_section = HashMap::new();
@ -714,7 +741,7 @@ mod test {
&maps_by_section,
&maps_by_symbol,
&symbol_table,
None,
&HashSet::new(),
)
.unwrap();

@ -11,6 +11,7 @@ use aya_obj::{
btf::{BtfFeatures, BtfRelocationError},
generated::{BPF_F_SLEEPABLE, BPF_F_XDP_HAS_FRAGS},
relocation::BpfRelocationError,
BpfSectionKind, Features,
};
use log::debug;
use thiserror::Error;
@ -23,7 +24,6 @@ use crate::{
maps::{Map, MapData, MapError},
obj::{
btf::{Btf, BtfError},
maps::MapKind,
Object, ParseError, ProgramSection,
},
programs::{
@ -36,7 +36,7 @@ use crate::{
bpf_load_btf, bpf_map_freeze, bpf_map_update_elem_ptr, is_btf_datasec_supported,
is_btf_decl_tag_supported, is_btf_float_supported, is_btf_func_global_supported,
is_btf_func_supported, is_btf_supported, is_btf_type_tag_supported, is_perf_link_supported,
is_prog_name_supported, retry_with_verifier_logs,
is_probe_read_kernel_supported, is_prog_name_supported, retry_with_verifier_logs,
},
util::{bytes_of, bytes_of_slice, possible_cpus, VerifierLog, POSSIBLE_CPUS},
};
@ -66,19 +66,10 @@ unsafe impl<T: Pod, const N: usize> Pod for [T; N] {}
pub use aya_obj::maps::{bpf_map_def, PinningType};
lazy_static! {
pub(crate) static ref FEATURES: Features = Features::new();
pub(crate) static ref FEATURES: Features = detect_features();
}
// Features implements BPF and BTF feature detection
#[derive(Default, Debug)]
pub(crate) struct Features {
pub bpf_name: bool,
pub bpf_perf_link: bool,
pub btf: Option<BtfFeatures>,
}
impl Features {
fn new() -> Self {
fn detect_features() -> Features {
let btf = if is_btf_supported() {
Some(BtfFeatures {
btf_func: is_btf_func_supported(),
@ -93,12 +84,12 @@ impl Features {
};
let f = Features {
bpf_name: is_prog_name_supported(),
bpf_probe_read_kernel: is_probe_read_kernel_supported(),
bpf_perf_link: is_perf_link_supported(),
btf,
};
debug!("BPF Feature Detection: {:#?}", f);
f
}
}
/// Builder style API for advanced loading of eBPF programs.
@ -415,14 +406,14 @@ impl<'a> BpfLoader<'a> {
}
PinningType::None => map.create(&name)?,
};
if !map.obj.data().is_empty() && map.obj.kind() != MapKind::Bss {
if !map.obj.data().is_empty() && map.obj.section_kind() != BpfSectionKind::Bss {
bpf_map_update_elem_ptr(fd, &0 as *const _, map.obj.data_mut().as_mut_ptr(), 0)
.map_err(|(_, io_error)| MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(),
io_error,
})?;
}
if map.obj.kind() == MapKind::Rodata {
if map.obj.section_kind() == BpfSectionKind::Rodata {
bpf_map_freeze(fd).map_err(|(_, io_error)| MapError::SyscallError {
call: "bpf_map_freeze".to_owned(),
io_error,
@ -431,11 +422,19 @@ impl<'a> BpfLoader<'a> {
maps.insert(name, map);
}
let text_sections = obj
.functions
.keys()
.map(|(section_index, _)| *section_index)
.collect();
obj.relocate_maps(
maps.iter()
.map(|(s, data)| (s.as_str(), data.fd, &data.obj)),
&text_sections,
)?;
obj.relocate_calls()?;
obj.relocate_calls(&text_sections)?;
obj.sanitize_programs(&FEATURES);
let programs = obj
.programs

@ -1,6 +1,5 @@
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -35,9 +34,9 @@ pub struct Array<T, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
impl<T: Borrow<MapData>, V: Pod> Array<T, V> {
pub(crate) fn new(map: T) -> Result<Array<T, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<u32, V>(data)?;
let _fd = data.fd_or_err()?;
@ -52,7 +51,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.as_ref().obj.max_entries()
self.inner.borrow().obj.max_entries()
}
/// Returns the value stored at the given index.
@ -62,7 +61,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: &u32, flags: u64) -> Result<V, MapError> {
let data = self.inner.as_ref();
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd_or_err()?;
@ -82,7 +81,7 @@ impl<T: AsRef<MapData>, V: Pod> Array<T, V> {
}
}
impl<T: AsMut<MapData>, V: Pod> Array<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> Array<T, V> {
/// Sets the value of the element at the given index.
///
/// # Errors
@ -90,7 +89,7 @@ impl<T: AsMut<MapData>, V: Pod> Array<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd_or_err()?;
bpf_map_update_elem(fd, Some(&index), value.borrow(), flags).map_err(|(_, io_error)| {
@ -103,9 +102,9 @@ impl<T: AsMut<MapData>, V: Pod> Array<T, V> {
}
}
impl<T: AsRef<MapData>, V: Pod> IterableMap<u32, V> for Array<T, V> {
impl<T: Borrow<MapData>, V: Pod> IterableMap<u32, V> for Array<T, V> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, index: &u32) -> Result<V, MapError> {

@ -1,5 +1,5 @@
use std::{
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -53,9 +53,9 @@ pub struct PerCpuArray<T, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
impl<T: Borrow<MapData>, V: Pod> PerCpuArray<T, V> {
pub(crate) fn new(map: T) -> Result<PerCpuArray<T, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<u32, V>(data)?;
let _fd = data.fd_or_err()?;
@ -70,7 +70,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn len(&self) -> u32 {
self.inner.as_ref().obj.max_entries()
self.inner.borrow().obj.max_entries()
}
/// Returns a slice of values - one for each CPU - stored at the given index.
@ -80,7 +80,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_lookup_elem` fails.
pub fn get(&self, index: &u32, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let data = self.inner.as_ref();
let data = self.inner.borrow();
check_bounds(data, *index)?;
let fd = data.fd_or_err()?;
@ -100,7 +100,7 @@ impl<T: AsRef<MapData>, V: Pod> PerCpuArray<T, V> {
}
}
impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> PerCpuArray<T, V> {
/// Sets the values - one for each CPU - at the given index.
///
/// # Errors
@ -108,7 +108,7 @@ impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> {
/// Returns [`MapError::OutOfBounds`] if `index` is out of bounds, [`MapError::SyscallError`]
/// if `bpf_map_update_elem` fails.
pub fn set(&mut self, index: u32, values: PerCpuValues<V>, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd_or_err()?;
@ -122,9 +122,9 @@ impl<T: AsMut<MapData>, V: Pod> PerCpuArray<T, V> {
}
}
impl<T: AsRef<MapData>, V: Pod> IterableMap<u32, PerCpuValues<V>> for PerCpuArray<T, V> {
impl<T: Borrow<MapData>, V: Pod> IterableMap<u32, PerCpuValues<V>> for PerCpuArray<T, V> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, index: &u32) -> Result<PerCpuValues<V>, MapError> {

@ -1,7 +1,7 @@
//! An array of eBPF program file descriptors used as a jump table.
use std::{
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
os::unix::prelude::{AsRawFd, RawFd},
};
@ -51,9 +51,9 @@ pub struct ProgramArray<T> {
inner: T,
}
impl<T: AsRef<MapData>> ProgramArray<T> {
impl<T: Borrow<MapData>> ProgramArray<T> {
pub(crate) fn new(map: T) -> Result<ProgramArray<T>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
let _fd = data.fd_or_err()?;
@ -64,17 +64,17 @@ impl<T: AsRef<MapData>> ProgramArray<T> {
/// An iterator over the indices of the array that point to a program. The iterator item type
/// is `Result<u32, MapError>`.
pub fn indices(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
}
impl<T: AsMut<MapData>> ProgramArray<T> {
impl<T: BorrowMut<MapData>> ProgramArray<T> {
/// Sets the target program file descriptor for the given index in the jump table.
///
/// When an eBPF program calls `bpf_tail_call(ctx, prog_array, index)`, control
/// flow will jump to `program`.
pub fn set(&mut self, index: u32, program: ProgramFd, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
check_bounds(data, index)?;
let fd = data.fd_or_err()?;
let prog_fd = program.as_raw_fd();
@ -93,9 +93,9 @@ impl<T: AsMut<MapData>> ProgramArray<T> {
/// Calling `bpf_tail_call(ctx, prog_array, index)` on an index that has been cleared returns an
/// error.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
check_bounds(data, *index)?;
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow_mut().fd_or_err()?;
bpf_map_delete_elem(fd, index)
.map(|_| ())

@ -1,5 +1,5 @@
//! A Bloom Filter.
use std::{borrow::Borrow, convert::AsRef, marker::PhantomData};
use std::{borrow::Borrow, marker::PhantomData};
use crate::{
maps::{check_v_size, MapData, MapError},
@ -35,9 +35,9 @@ pub struct BloomFilter<T, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> {
impl<T: Borrow<MapData>, V: Pod> BloomFilter<T, V> {
pub(crate) fn new(map: T) -> Result<BloomFilter<T, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_v_size::<V>(data)?;
let _ = data.fd_or_err()?;
@ -50,7 +50,7 @@ impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> {
/// Query the existence of the element.
pub fn contains(&self, mut value: &V, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_lookup_elem_ptr::<u32, _>(fd, None, &mut value, flags)
.map_err(|(_, io_error)| MapError::SyscallError {
@ -63,7 +63,7 @@ impl<T: AsRef<MapData>, V: Pod> BloomFilter<T, V> {
/// Inserts a value into the map.
pub fn insert(&self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_push_elem".to_owned(),
@ -84,10 +84,7 @@ mod tests {
bpf_map_type::{BPF_MAP_TYPE_BLOOM_FILTER, BPF_MAP_TYPE_PERF_EVENT_ARRAY},
},
maps::{Map, MapData},
obj::{
self,
maps::{LegacyMap, MapKind},
},
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, SysResult, Syscall},
};
use libc::{EFAULT, ENOENT};
@ -103,9 +100,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
kind: MapKind::Other,
})
}
@ -142,9 +139,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
kind: MapKind::Other,
}),
fd: None,
pinned: false,

@ -1,6 +1,5 @@
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -39,9 +38,9 @@ pub struct HashMap<T, K, V> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<HashMap<T, K, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<K, V>(data)?;
let _ = data.fd_or_err()?;
@ -54,7 +53,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Returns a copy of the value associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(),
@ -73,11 +72,11 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
}
impl<T: AsMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
impl<T: BorrowMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
/// Inserts a key-value pair into the map.
pub fn insert(
&mut self,
@ -85,18 +84,18 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> HashMap<T, K, V> {
value: impl Borrow<V>,
flags: u64,
) -> Result<(), MapError> {
hash_map::insert(self.inner.as_mut(), key.borrow(), value.borrow(), flags)
hash_map::insert(self.inner.borrow_mut(), key.borrow(), value.borrow(), flags)
}
/// Removes a key from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key)
hash_map::remove(self.inner.borrow_mut(), key)
}
}
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<K, V> for HashMap<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, V> for HashMap<T, K, V> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, key: &K) -> Result<V, MapError> {
@ -117,10 +116,7 @@ mod tests {
bpf_map_type::{BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_LRU_HASH},
},
maps::{Map, MapData},
obj::{
self,
maps::{LegacyMap, MapKind},
},
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, SysResult, Syscall},
};
@ -136,9 +132,9 @@ mod tests {
..Default::default()
},
section_index: 0,
section_kind: BpfSectionKind::Maps,
data: Vec::new(),
kind: MapKind::Other,
symbol_index: 0,
symbol_index: None,
})
}
@ -267,9 +263,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
kind: MapKind::Other,
}),
fd: Some(42),
pinned: false,

@ -1,7 +1,6 @@
//! Per-CPU hash map.
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -48,9 +47,9 @@ pub struct PerCpuHashMap<T, K: Pod, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
pub(crate) fn new(map: T) -> Result<PerCpuHashMap<T, K, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<K, V>(data)?;
let _ = data.fd_or_err()?;
@ -64,7 +63,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Returns a slice of values - one for each CPU - associated with the key.
pub fn get(&self, key: &K, flags: u64) -> Result<PerCpuValues<V>, MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let values = bpf_map_lookup_elem_per_cpu(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(),
@ -83,11 +82,11 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
}
impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
impl<T: BorrowMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Inserts a slice of values - one for each CPU - for the given key.
///
/// # Examples
@ -122,7 +121,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
values: PerCpuValues<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow_mut().fd_or_err()?;
bpf_map_update_elem_per_cpu(fd, key.borrow(), &values, flags).map_err(
|(_, io_error)| MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(),
@ -135,13 +134,15 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> PerCpuHashMap<T, K, V> {
/// Removes a key from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key)
hash_map::remove(self.inner.borrow_mut(), key)
}
}
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<K, PerCpuValues<V>> for PerCpuHashMap<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<K, PerCpuValues<V>>
for PerCpuHashMap<T, K, V>
{
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, key: &K) -> Result<PerCpuValues<V>, MapError> {

@ -1,7 +1,6 @@
//! A LPM Trie.
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -99,9 +98,9 @@ impl<K: Pod> Clone for Key<K> {
// A Pod impl is required as Key struct is a key for a map.
unsafe impl<K: Pod> Pod for Key<K> {}
impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
pub(crate) fn new(map: T) -> Result<LpmTrie<T, K, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<Key<K>, V>(data)?;
let _ = data.fd_or_err()?;
@ -115,7 +114,7 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Returns a copy of the value associated with the longest prefix matching key in the LpmTrie.
pub fn get(&self, key: &Key<K>, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(),
@ -134,17 +133,17 @@ impl<T: AsRef<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<Key<K>, MapError>`.
pub fn keys(&self) -> MapKeys<'_, Key<K>> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
/// An iterator visiting all keys matching key. The
/// iterator item type is `Result<Key<K>, MapError>`.
pub fn iter_key(&self, key: Key<K>) -> LpmTrieKeys<'_, K> {
LpmTrieKeys::new(self.inner.as_ref(), key)
LpmTrieKeys::new(self.inner.borrow(), key)
}
}
impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
impl<T: BorrowMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
/// Inserts a key value pair into the map.
pub fn insert(
&mut self,
@ -152,7 +151,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
value: impl Borrow<V>,
flags: u64,
) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_update_elem(fd, Some(key), value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(),
@ -167,7 +166,7 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
///
/// Both the prefix and data must match exactly - this method does not do a longest prefix match.
pub fn remove(&mut self, key: &Key<K>) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_delete_elem(fd, key)
.map(|_| ())
.map_err(|(_, io_error)| MapError::SyscallError {
@ -177,9 +176,9 @@ impl<T: AsMut<MapData>, K: Pod, V: Pod> LpmTrie<T, K, V> {
}
}
impl<T: AsRef<MapData>, K: Pod, V: Pod> IterableMap<Key<K>, V> for LpmTrie<T, K, V> {
impl<T: Borrow<MapData>, K: Pod, V: Pod> IterableMap<Key<K>, V> for LpmTrie<T, K, V> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, key: &Key<K>) -> Result<V, MapError> {
@ -247,10 +246,7 @@ mod tests {
bpf_map_type::{BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_PERF_EVENT_ARRAY},
},
maps::{Map, MapData},
obj::{
self,
maps::{LegacyMap, MapKind},
},
obj::{self, maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, SysResult, Syscall},
};
use libc::{EFAULT, ENOENT};
@ -266,9 +262,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
kind: MapKind::Other,
})
}
@ -322,9 +318,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: None,
data: Vec::new(),
kind: MapKind::Other,
}),
fd: None,
btf_fd: None,

@ -37,7 +37,6 @@
//! versa. Because of that, all map values must be plain old data and therefore
//! implement the [Pod] trait.
use std::{
convert::{AsMut, AsRef},
ffi::CString,
fmt, io,
marker::PhantomData,
@ -481,18 +480,6 @@ pub struct MapData {
pub pinned: bool,
}
impl AsRef<MapData> for MapData {
fn as_ref(&self) -> &MapData {
self
}
}
impl AsMut<MapData> for MapData {
fn as_mut(&mut self) -> &mut MapData {
self
}
}
impl MapData {
/// Creates a new map with the provided `name`
pub fn create(&mut self, name: &str) -> Result<RawFd, MapError> {
@ -845,7 +832,7 @@ mod tests {
bpf_map_def,
generated::{bpf_cmd, bpf_map_type::BPF_MAP_TYPE_HASH},
maps::MapData,
obj::maps::{LegacyMap, MapKind},
obj::{maps::LegacyMap, BpfSectionKind},
sys::{override_syscall, Syscall},
};
@ -861,9 +848,9 @@ mod tests {
..Default::default()
},
section_index: 0,
symbol_index: 0,
section_kind: BpfSectionKind::Maps,
symbol_index: Some(0),
data: Vec::new(),
kind: MapKind::Other,
})
}

@ -1,6 +1,6 @@
use bytes::BytesMut;
use std::{
convert::AsMut,
borrow::{Borrow, BorrowMut},
os::unix::prelude::{AsRawFd, RawFd},
};
@ -89,7 +89,7 @@ pub struct AsyncPerfEventArray<T> {
perf_map: PerfEventArray<T>,
}
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArray<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArray<T> {
/// Opens the perf buffer at the given index.
///
/// The returned buffer will receive all the events eBPF programs send at the given index.
@ -112,7 +112,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArray<T> {
}
}
impl<T: AsRef<MapData>> AsyncPerfEventArray<T> {
impl<T: Borrow<MapData>> AsyncPerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<AsyncPerfEventArray<T>, MapError> {
Ok(AsyncPerfEventArray {
perf_map: PerfEventArray::new(map)?,
@ -138,7 +138,7 @@ pub struct AsyncPerfEventArrayBuffer<T> {
}
#[cfg(any(feature = "async_tokio"))]
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer.
///
/// This method reads events into the provided slice of buffers, filling
@ -168,7 +168,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> {
}
#[cfg(all(not(feature = "async_tokio"), feature = "async_std"))]
impl<T: AsMut<MapData> + AsRef<MapData>> AsyncPerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsyncPerfEventArrayBuffer<T> {
/// Reads events from the buffer.
///
/// This method reads events into the provided slice of buffers, filling

@ -2,7 +2,7 @@
//!
//! [`perf`]: https://perf.wiki.kernel.org/index.php/Main_Page.
use std::{
convert::AsMut,
borrow::{Borrow, BorrowMut},
ops::Deref,
os::unix::io::{AsRawFd, RawFd},
sync::Arc,
@ -31,7 +31,7 @@ pub struct PerfEventArrayBuffer<T> {
buf: PerfBuffer,
}
impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArrayBuffer<T> {
/// Returns true if the buffer contains events that haven't been read.
pub fn readable(&self) -> bool {
self.buf.readable()
@ -55,7 +55,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArrayBuffer<T> {
}
}
impl<T: AsMut<MapData> + AsRef<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
fn as_raw_fd(&self) -> RawFd {
self.buf.as_raw_fd()
}
@ -84,14 +84,14 @@ impl<T: AsMut<MapData> + AsRef<MapData>> AsRawFd for PerfEventArrayBuffer<T> {
/// ```no_run
/// # use aya::maps::perf::PerfEventArrayBuffer;
/// # use aya::maps::MapData;
/// # use std::convert::AsMut;
/// # use std::borrow::BorrowMut;
/// # struct Poll<T> { _t: std::marker::PhantomData<T> };
/// # impl<T: AsMut<MapData>> Poll<T> {
/// # impl<T: BorrowMut<MapData>> Poll<T> {
/// # fn poll_readable(&self) -> &mut [PerfEventArrayBuffer<T>] {
/// # &mut []
/// # }
/// # }
/// # fn poll_buffers<T: AsMut<MapData>>(bufs: Vec<PerfEventArrayBuffer<T>>) -> Poll<T> {
/// # fn poll_buffers<T: BorrowMut<MapData>>(bufs: Vec<PerfEventArrayBuffer<T>>) -> Poll<T> {
/// # Poll { _t: std::marker::PhantomData }
/// # }
/// # #[derive(thiserror::Error, Debug)]
@ -160,9 +160,9 @@ pub struct PerfEventArray<T> {
page_size: usize,
}
impl<T: AsRef<MapData>> PerfEventArray<T> {
impl<T: Borrow<MapData>> PerfEventArray<T> {
pub(crate) fn new(map: T) -> Result<PerfEventArray<T>, MapError> {
let _fd = map.as_ref().fd_or_err()?;
let _fd = map.borrow().fd_or_err()?;
Ok(PerfEventArray {
map: Arc::new(map),
@ -171,7 +171,7 @@ impl<T: AsRef<MapData>> PerfEventArray<T> {
}
}
impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArray<T> {
impl<T: BorrowMut<MapData> + Borrow<MapData>> PerfEventArray<T> {
/// Opens the perf buffer at the given index.
///
/// The returned buffer will receive all the events eBPF programs send at the given index.
@ -183,7 +183,7 @@ impl<T: AsMut<MapData> + AsRef<MapData>> PerfEventArray<T> {
// FIXME: keep track of open buffers
// this cannot fail as new() checks that the fd is open
let map_data: &MapData = self.map.deref().as_ref();
let map_data: &MapData = self.map.deref().borrow();
let map_fd = map_data.fd_or_err().unwrap();
let buf = PerfBuffer::open(index, self.page_size, page_count.unwrap_or(2))?;
bpf_map_update_elem(map_fd, Some(&index), &buf.as_raw_fd(), 0)

@ -1,7 +1,6 @@
//! A FIFO queue.
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -34,9 +33,9 @@ pub struct Queue<T, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, V: Pod> Queue<T, V> {
impl<T: Borrow<MapData>, V: Pod> Queue<T, V> {
pub(crate) fn new(map: T) -> Result<Queue<T, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<(), V>(data)?;
let _fd = data.fd_or_err()?;
@ -51,11 +50,11 @@ impl<T: AsRef<MapData>, V: Pod> Queue<T, V> {
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn capacity(&self) -> u32 {
self.inner.as_ref().obj.max_entries()
self.inner.borrow().obj.max_entries()
}
}
impl<T: AsMut<MapData>, V: Pod> Queue<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> Queue<T, V> {
/// Removes the first element and returns it.
///
/// # Errors
@ -63,7 +62,7 @@ impl<T: AsMut<MapData>, V: Pod> Queue<T, V> {
/// Returns [`MapError::ElementNotFound`] if the queue is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| MapError::SyscallError {
@ -80,7 +79,7 @@ impl<T: AsMut<MapData>, V: Pod> Queue<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_push_elem(fd, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_push_elem".to_owned(),

@ -1,6 +1,5 @@
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
os::unix::io::{AsRawFd, RawFd},
};
@ -69,9 +68,9 @@ pub struct SockHash<T, K> {
_k: PhantomData<K>,
}
impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
impl<T: Borrow<MapData>, K: Pod> SockHash<T, K> {
pub(crate) fn new(map: T) -> Result<SockHash<T, K>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<K, u32>(data)?;
let _ = data.fd_or_err()?;
@ -83,7 +82,7 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// Returns the fd of the socket stored at the given key.
pub fn get(&self, key: &K, flags: u64) -> Result<RawFd, MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_elem(fd, key, flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_lookup_elem".to_owned(),
@ -102,7 +101,7 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// An iterator visiting all keys in arbitrary order. The iterator element
/// type is `Result<K, MapError>`.
pub fn keys(&self) -> MapKeys<'_, K> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
/// Returns the map's file descriptor.
@ -110,11 +109,11 @@ impl<T: AsRef<MapData>, K: Pod> SockHash<T, K> {
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.as_ref().fd_or_err()?))
Ok(SockMapFd(self.inner.borrow().fd_or_err()?))
}
}
impl<T: AsMut<MapData>, K: Pod> SockHash<T, K> {
impl<T: BorrowMut<MapData>, K: Pod> SockHash<T, K> {
/// Inserts a socket under the given key.
pub fn insert<I: AsRawFd>(
&mut self,
@ -122,18 +121,23 @@ impl<T: AsMut<MapData>, K: Pod> SockHash<T, K> {
value: I,
flags: u64,
) -> Result<(), MapError> {
hash_map::insert(self.inner.as_mut(), key.borrow(), &value.as_raw_fd(), flags)
hash_map::insert(
self.inner.borrow_mut(),
key.borrow(),
&value.as_raw_fd(),
flags,
)
}
/// Removes a socket from the map.
pub fn remove(&mut self, key: &K) -> Result<(), MapError> {
hash_map::remove(self.inner.as_mut(), key)
hash_map::remove(self.inner.borrow_mut(), key)
}
}
impl<T: AsRef<MapData>, K: Pod> IterableMap<K, RawFd> for SockHash<T, K> {
impl<T: Borrow<MapData>, K: Pod> IterableMap<K, RawFd> for SockHash<T, K> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, key: &K) -> Result<RawFd, MapError> {

@ -1,7 +1,7 @@
//! An array of eBPF program file descriptors used as a jump table.
use std::{
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
os::unix::{io::AsRawFd, prelude::RawFd},
};
@ -44,9 +44,9 @@ pub struct SockMap<T> {
pub(crate) inner: T,
}
impl<T: AsRef<MapData>> SockMap<T> {
impl<T: Borrow<MapData>> SockMap<T> {
pub(crate) fn new(map: T) -> Result<SockMap<T>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<u32, RawFd>(data)?;
let _fd = data.fd_or_err()?;
@ -57,7 +57,7 @@ impl<T: AsRef<MapData>> SockMap<T> {
/// An iterator over the indices of the array that point to a program. The iterator item type
/// is `Result<u32, MapError>`.
pub fn indices(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
/// Returns the map's file descriptor.
@ -65,14 +65,14 @@ impl<T: AsRef<MapData>> SockMap<T> {
/// The returned file descriptor can be used to attach programs that work with
/// socket maps, like [`SkMsg`](crate::programs::SkMsg) and [`SkSkb`](crate::programs::SkSkb).
pub fn fd(&self) -> Result<SockMapFd, MapError> {
Ok(SockMapFd(self.inner.as_ref().fd_or_err()?))
Ok(SockMapFd(self.inner.borrow().fd_or_err()?))
}
}
impl<T: AsMut<MapData>> SockMap<T> {
impl<T: BorrowMut<MapData>> SockMap<T> {
/// Stores a socket into the map.
pub fn set<I: AsRawFd>(&mut self, index: u32, socket: &I, flags: u64) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
let fd = data.fd_or_err()?;
check_bounds(data, index)?;
bpf_map_update_elem(fd, Some(&index), &socket.as_raw_fd(), flags).map_err(
@ -86,7 +86,7 @@ impl<T: AsMut<MapData>> SockMap<T> {
/// Removes the socket stored at `index` from the map.
pub fn clear_index(&mut self, index: &u32) -> Result<(), MapError> {
let data = self.inner.as_mut();
let data = self.inner.borrow_mut();
let fd = data.fd_or_err()?;
check_bounds(data, *index)?;
bpf_map_delete_elem(fd, index)

@ -1,7 +1,6 @@
//! A LIFO stack.
use std::{
borrow::Borrow,
convert::{AsMut, AsRef},
borrow::{Borrow, BorrowMut},
marker::PhantomData,
};
@ -34,9 +33,9 @@ pub struct Stack<T, V: Pod> {
_v: PhantomData<V>,
}
impl<T: AsRef<MapData>, V: Pod> Stack<T, V> {
impl<T: Borrow<MapData>, V: Pod> Stack<T, V> {
pub(crate) fn new(map: T) -> Result<Stack<T, V>, MapError> {
let data = map.as_ref();
let data = map.borrow();
check_kv_size::<(), V>(data)?;
let _fd = data.fd_or_err()?;
@ -51,11 +50,11 @@ impl<T: AsRef<MapData>, V: Pod> Stack<T, V> {
///
/// This corresponds to the value of `bpf_map_def::max_entries` on the eBPF side.
pub fn capacity(&self) -> u32 {
self.inner.as_ref().obj.max_entries()
self.inner.borrow().obj.max_entries()
}
}
impl<T: AsMut<MapData>, V: Pod> Stack<T, V> {
impl<T: BorrowMut<MapData>, V: Pod> Stack<T, V> {
/// Removes the last element and returns it.
///
/// # Errors
@ -63,7 +62,7 @@ impl<T: AsMut<MapData>, V: Pod> Stack<T, V> {
/// Returns [`MapError::ElementNotFound`] if the stack is empty, [`MapError::SyscallError`]
/// if `bpf_map_lookup_and_delete_elem` fails.
pub fn pop(&mut self, flags: u64) -> Result<V, MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let value = bpf_map_lookup_and_delete_elem::<u32, _>(fd, None, flags).map_err(
|(_, io_error)| MapError::SyscallError {
@ -80,7 +79,7 @@ impl<T: AsMut<MapData>, V: Pod> Stack<T, V> {
///
/// [`MapError::SyscallError`] if `bpf_map_update_elem` fails.
pub fn push(&mut self, value: impl Borrow<V>, flags: u64) -> Result<(), MapError> {
let fd = self.inner.as_mut().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
bpf_map_update_elem(fd, None::<&u32>, value.borrow(), flags).map_err(|(_, io_error)| {
MapError::SyscallError {
call: "bpf_map_update_elem".to_owned(),

@ -1,7 +1,7 @@
//! A hash map of kernel or user space stack traces.
//!
//! See [`StackTraceMap`] for documentation and examples.
use std::{collections::BTreeMap, convert::AsRef, fs, io, mem, path::Path, str::FromStr};
use std::{borrow::Borrow, collections::BTreeMap, fs, io, mem, path::Path, str::FromStr};
use crate::{
maps::{IterableMap, MapData, MapError, MapIter, MapKeys},
@ -67,9 +67,9 @@ pub struct StackTraceMap<T> {
max_stack_depth: usize,
}
impl<T: AsRef<MapData>> StackTraceMap<T> {
impl<T: Borrow<MapData>> StackTraceMap<T> {
pub(crate) fn new(map: T) -> Result<StackTraceMap<T>, MapError> {
let data = map.as_ref();
let data = map.borrow();
let expected = mem::size_of::<u32>();
let size = data.obj.key_size() as usize;
if size != expected {
@ -102,7 +102,7 @@ impl<T: AsRef<MapData>> StackTraceMap<T> {
/// Returns [`MapError::KeyNotFound`] if there is no stack trace with the
/// given `stack_id`, or [`MapError::SyscallError`] if `bpf_map_lookup_elem` fails.
pub fn get(&self, stack_id: &u32, flags: u64) -> Result<StackTrace, MapError> {
let fd = self.inner.as_ref().fd_or_err()?;
let fd = self.inner.borrow().fd_or_err()?;
let mut frames = vec![0; self.max_stack_depth];
bpf_map_lookup_elem_ptr(fd, Some(stack_id), frames.as_mut_ptr(), flags)
@ -136,13 +136,13 @@ impl<T: AsRef<MapData>> StackTraceMap<T> {
/// An iterator visiting all the stack_ids in arbitrary order. The iterator element
/// type is `Result<u32, MapError>`.
pub fn stack_ids(&self) -> MapKeys<'_, u32> {
MapKeys::new(self.inner.as_ref())
MapKeys::new(self.inner.borrow())
}
}
impl<T: AsRef<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> {
impl<T: Borrow<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> {
fn map(&self) -> &MapData {
self.inner.as_ref()
self.inner.borrow()
}
fn get(&self, index: &u32) -> Result<StackTrace, MapError> {
@ -150,7 +150,7 @@ impl<T: AsRef<MapData>> IterableMap<u32, StackTrace> for StackTraceMap<T> {
}
}
impl<'a, T: AsRef<MapData>> IntoIterator for &'a StackTraceMap<T> {
impl<'a, T: Borrow<MapData>> IntoIterator for &'a StackTraceMap<T> {
type Item = Result<(u32, StackTrace), MapError>;
type IntoIter = MapIter<'a, u32, StackTrace, StackTraceMap<T>>;

@ -66,7 +66,7 @@ pub(crate) fn bpf_create_map(name: &CStr, def: &obj::Map, btf_fd: Option<RawFd>)
_ => {
u.btf_key_type_id = m.def.btf_key_type_id;
u.btf_value_type_id = m.def.btf_value_type_id;
u.btf_fd = btf_fd.unwrap() as u32;
u.btf_fd = btf_fd.unwrap_or_default() as u32;
}
}
}
@ -599,6 +599,37 @@ pub(crate) fn is_prog_name_supported() -> bool {
}
}
pub(crate) fn is_probe_read_kernel_supported() -> bool {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_3 };
let prog: &[u8] = &[
0xbf, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r1 = r10
0x07, 0x01, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, // r1 -= 8
0xb7, 0x02, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // r2 = 8
0xb7, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // r3 = 0
0x85, 0x00, 0x00, 0x00, 0x71, 0x00, 0x00, 0x00, // call 113
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit
];
let gpl = b"GPL\0";
u.license = gpl.as_ptr() as u64;
let insns = copy_instructions(prog).unwrap();
u.insn_cnt = insns.len() as u32;
u.insns = insns.as_ptr() as u64;
u.prog_type = bpf_prog_type::BPF_PROG_TYPE_TRACEPOINT as u32;
match sys_bpf(bpf_cmd::BPF_PROG_LOAD, &attr) {
Ok(v) => {
let fd = v as RawFd;
unsafe { close(fd) };
true
}
Err(_) => false,
}
}
pub(crate) fn is_perf_link_supported() -> bool {
let mut attr = unsafe { mem::zeroed::<bpf_attr>() };
let u = unsafe { &mut attr.__bindgen_anon_3 };

@ -8,6 +8,8 @@ mod fake;
use std::io;
#[cfg(not(test))]
use std::{ffi::CString, mem};
#[cfg(not(test))]
use std::{fs::File, io::Read};
#[cfg(not(test))]
use libc::utsname;
@ -82,8 +84,40 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
Ok((0xff, 0xff, 0xff))
}
#[cfg(not(test))]
fn ubuntu_kernel_version() -> Result<(u32, u32, u32), ()> {
if let Ok(mut file) = File::open("/proc/version_signature") {
let mut buf = String::new();
let mut major = 0u32;
let mut minor = 0u32;
let mut patch = 0u32;
let format = CString::new("%*s %*s %u.%u.%u\n").unwrap();
file.read_to_string(&mut buf).map_err(|_| ())?;
unsafe {
if libc::sscanf(
buf.as_ptr() as *const _,
format.as_ptr(),
&mut major as *mut u32,
&mut minor as *mut _,
&mut patch as *mut _,
) == 3
{
return Ok((major, minor, patch));
}
}
}
Err(())
}
#[cfg(not(test))]
pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
if let Ok(version) = ubuntu_kernel_version() {
return Ok(version);
}
unsafe {
let mut v = mem::zeroed::<utsname>();
if libc::uname(&mut v as *mut _) != 0 {
@ -93,6 +127,33 @@ pub(crate) fn kernel_version() -> Result<(u32, u32, u32), ()> {
let mut major = 0u32;
let mut minor = 0u32;
let mut patch = 0u32;
let debian_marker = CString::new("Debian").unwrap();
let p = libc::strstr(v.version.as_ptr(), debian_marker.as_ptr());
if !p.is_null() {
let debian_format = CString::new("Debian %u.%u.%u").map_err(|_| ())?;
if libc::sscanf(
p,
debian_format.as_ptr(),
&mut major as *mut u32,
&mut minor as *mut _,
&mut patch as *mut _,
) == 3
{
// On Debian 10, kernels after 4.19.229 expect 4.19.255 due to broken Makefile patches.
let patch_level_limit = if major == 4 && minor == 19 { 230 } else { 255 };
if patch >= patch_level_limit {
patch = 255;
}
return Ok((major, minor, patch));
}
}
let format = CString::new("%u.%u.%u").unwrap();
if libc::sscanf(
v.release.as_ptr(),

@ -6,8 +6,8 @@ use core::{
use aya_bpf_bindings::helpers::{
bpf_clone_redirect, bpf_get_socket_uid, bpf_l3_csum_replace, bpf_l4_csum_replace,
bpf_skb_adjust_room, bpf_skb_change_type, bpf_skb_load_bytes, bpf_skb_pull_data,
bpf_skb_store_bytes,
bpf_skb_adjust_room, bpf_skb_change_proto, bpf_skb_change_type, bpf_skb_load_bytes,
bpf_skb_pull_data, bpf_skb_store_bytes,
};
use aya_bpf_cty::c_long;
@ -189,6 +189,16 @@ impl SkBuff {
}
}
#[inline]
pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_proto(self.as_ptr() as *mut _, proto, flags) };
if ret == 0 {
Ok(())
} else {
Err(ret)
}
}
#[inline]
pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
let ret = unsafe { bpf_skb_change_type(self.as_ptr() as *mut _, ty) };

@ -142,6 +142,11 @@ impl TcContext {
self.skb.clone_redirect(if_index, flags)
}
#[inline]
pub fn change_proto(&self, proto: u16, flags: u64) -> Result<(), c_long> {
self.skb.change_proto(proto, flags)
}
#[inline]
pub fn change_type(&self, ty: u32) -> Result<(), c_long> {
self.skb.change_type(ty)

@ -22,3 +22,7 @@ path = "src/pass.rs"
[[bin]]
name = "test"
path = "src/test.rs"
[[bin]]
name = "relocations"
path = "src/relocations.rs"

@ -0,0 +1,28 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, 2);
} RESULTS SEC(".maps");
static __u64
inc_cb(void *map, __u32 *key, void *val,
void *data)
{
__u64 *value = val;
*value += 1;
return 0;
}
SEC("uprobe/test_text_64_64_reloc")
int test_text_64_64_reloc(struct pt_regs *ctx)
{
bpf_for_each_map_elem(&RESULTS, inc_cb, NULL, 0);
return 0;
}

@ -0,0 +1,45 @@
#![no_std]
#![no_main]
use core::hint;
use aya_bpf::{
macros::{map, uprobe},
maps::Array,
programs::ProbeContext,
};
#[map]
static mut RESULTS: Array<u64> = Array::with_max_entries(3, 0);
#[uprobe]
pub fn test_64_32_call_relocs(_ctx: ProbeContext) {
// this will link set_result and do a forward call
set_result(0, hint::black_box(1));
// set_result is already linked, this will just do the forward call
set_result(1, hint::black_box(2));
// this will link set_result_backward after set_result. Then will do a
// backward call to set_result.
set_result_backward(2, hint::black_box(3));
}
#[inline(never)]
fn set_result(index: u32, value: u64) {
unsafe {
if let Some(v) = RESULTS.get_ptr_mut(index) {
*v = value;
}
}
}
#[inline(never)]
fn set_result_backward(index: u32, value: u64) {
set_result(index, value);
}
#[panic_handler]
fn panic(_info: &core::panic::PanicInfo) -> ! {
unsafe { core::hint::unreachable_unchecked() }
}

@ -10,7 +10,7 @@ pub fn integration_test(_attr: TokenStream, item: TokenStream) -> TokenStream {
let expanded = quote! {
#item
inventory::submit!(IntegrationTest {
inventory::submit!(crate::IntegrationTest {
name: concat!(module_path!(), "::", #name_str),
test_fn: #name,
});

@ -0,0 +1,313 @@
use anyhow::{Context, Result};
use std::{path::PathBuf, process::Command, thread::sleep, time::Duration};
use tempfile::TempDir;
use aya::{maps::Array, programs::TracePoint, BpfLoader, Btf, Endianness};
use super::integration_test;
// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no
// special meaning, they just have "nice" bit patterns that can be helpful while debugging.
#[integration_test]
fn relocate_field() {
let test = RelocationTest {
local_definition: r#"
struct foo {
__u8 a;
__u8 b;
__u8 c;
__u8 d;
};
"#,
target_btf: r#"
struct foo {
__u8 a;
__u8 c;
__u8 b;
__u8 d;
} s1;
"#,
relocation_code: r#"
__u8 memory[] = {1, 2, 3, 4};
struct foo *ptr = (struct foo *) &memory;
value = __builtin_preserve_access_index(ptr->c);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 2);
assert_eq!(test.run_no_btf().unwrap(), 3);
}
#[integration_test]
fn relocate_enum() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAA };
"#,
target_btf: r#"
enum foo { D = 0xBBBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xBBBBBBBB);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA);
}
#[integration_test]
fn relocate_enum_signed() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0x7AAAAAAA };
"#,
target_btf: r#"
enum foo { D = -0x7BBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64);
assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64);
}
#[integration_test]
fn relocate_enum64() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = 0xCCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB);
}
#[integration_test]
fn relocate_enum64_signed() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0xAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = -0xCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64);
assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64);
}
#[integration_test]
fn relocate_pointer() {
let test = RelocationTest {
local_definition: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
target_btf: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
relocation_code: r#"
__u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0};
struct bar* ptr = (struct bar *) &memory;
value = (__u64) __builtin_preserve_access_index(ptr->f);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 42);
assert_eq!(test.run_no_btf().unwrap(), 42);
}
/// Utility code for running relocation tests:
/// - Generates the eBPF program using probided local definition and relocation code
/// - Generates the BTF from the target btf code
struct RelocationTest {
/// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode
local_definition: &'static str,
/// Target data structure definition. What the vmlinux would actually contain.
target_btf: &'static str,
/// Code executed by the eBPF program to test the relocation.
/// The format should be:
// __u8 memory[] = { ... };
// __u32 value = BPF_CORE_READ((struct foo *)&memory, ...);
//
// The generated code will be executed by attaching a tracepoint to sched_switch
// and emitting `__u32 value` an a map. See the code template below for more details.
relocation_code: &'static str,
}
impl RelocationTest {
/// Build a RelocationTestRunner
fn build(&self) -> Result<RelocationTestRunner> {
Ok(RelocationTestRunner {
ebpf: self.build_ebpf()?,
btf: self.build_btf()?,
})
}
/// - Generate the source eBPF filling a template
/// - Compile it with clang
fn build_ebpf(&self) -> Result<Vec<u8>> {
let local_definition = self.local_definition;
let relocation_code = self.relocation_code;
let (_tmp_dir, compiled_file) = compile(&format!(
r#"
#include <linux/bpf.h>
static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2;
{local_definition}
struct {{
int (*type)[BPF_MAP_TYPE_ARRAY];
__u32 *key;
__u64 *value;
int (*max_entries)[1];
}} output_map
__attribute__((section(".maps"), used));
__attribute__((section("tracepoint/bpf_prog"), used))
int bpf_prog(void *ctx) {{
__u32 key = 0;
__u64 value = 0;
{relocation_code}
bpf_map_update_elem(&output_map, &key, &value, BPF_ANY);
return 0;
}}
char _license[] __attribute__((section("license"), used)) = "GPL";
"#
))
.context("Failed to compile eBPF program")?;
let bytecode =
std::fs::read(compiled_file).context("Error reading compiled eBPF program")?;
Ok(bytecode)
}
/// - Generate the target BTF source with a mock main()
/// - Compile it with clang
/// - Extract the BTF with llvm-objcopy
fn build_btf(&self) -> Result<Btf> {
let target_btf = self.target_btf;
let relocation_code = self.relocation_code;
// BTF files can be generated and inspected with these commands:
// $ clang -c -g -O2 -target bpf target.c
// $ pahole --btf_encode_detached=target.btf -V target.o
// $ bpftool btf dump file ./target.btf format c
let (tmp_dir, compiled_file) = compile(&format!(
r#"
#include <linux/bpf.h>
{target_btf}
int main() {{
__u64 value = 0;
// This is needed to make sure to emit BTF for the defined types,
// it could be dead code eliminated if we don't.
{relocation_code};
return value;
}}
"#
))
.context("Failed to compile BTF")?;
Command::new("llvm-objcopy")
.current_dir(tmp_dir.path())
.args(["--dump-section", ".BTF=target.btf"])
.arg(compiled_file)
.status()
.context("Failed to run llvm-objcopy")?
.success()
.then_some(())
.context("Failed to extract BTF")?;
let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default())
.context("Error parsing generated BTF")?;
Ok(btf)
}
}
/// Compile an eBPF program and return the path of the compiled object.
/// Also returns a TempDir handler, dropping it will clear the created dicretory.
fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> {
let tmp_dir = tempfile::tempdir().context("Error making temp dir")?;
let source = tmp_dir.path().join("source.c");
std::fs::write(&source, source_code).context("Writing bpf program failed")?;
Command::new("clang")
.current_dir(&tmp_dir)
.args(["-c", "-g", "-O2", "-target", "bpf"])
.arg(&source)
.status()
.context("Failed to run clang")?
.success()
.then_some(())
.context("Failed to compile eBPF source")?;
Ok((tmp_dir, source.with_extension("o")))
}
struct RelocationTestRunner {
ebpf: Vec<u8>,
btf: Btf,
}
impl RelocationTestRunner {
/// Run test and return the output value
fn run(&self) -> Result<u64> {
self.run_internal(true).context("Error running with BTF")
}
/// Run without loading btf
fn run_no_btf(&self) -> Result<u64> {
self.run_internal(false)
.context("Error running without BTF")
}
fn run_internal(&self, with_relocations: bool) -> Result<u64> {
let mut loader = BpfLoader::new();
if with_relocations {
loader.btf(Some(&self.btf));
} else {
loader.btf(None);
}
let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?;
let program: &mut TracePoint = bpf
.program_mut("bpf_prog")
.context("bpf_prog not found")?
.try_into()
.context("program not a tracepoint")?;
program.load().context("Loading tracepoint failed")?;
// Attach to sched_switch and wait some time to make sure it executed at least once
program
.attach("sched", "sched_switch")
.context("attach failed")?;
sleep(Duration::from_millis(1000));
// To inspect the loaded eBPF bytecode, increse the timeout and run:
// $ sudo bpftool prog dump xlated name bpf_prog
let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap();
let key = 0;
output_map.get(&key, 0).context("Getting key 0 failed")
}
}

@ -1,11 +1,11 @@
use super::{integration_test, IntegrationTest};
use super::integration_test;
use aya::include_bytes_aligned;
use object::{Object, ObjectSymbol};
#[integration_test]
fn test_maps() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/map_test");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/map_test");
let obj_file = object::File::parse(bytes).unwrap();
if obj_file.section_by_name("maps").is_none() {
panic!("No 'maps' ELF section");

@ -13,14 +13,14 @@ use log::warn;
use crate::tests::kernel_version;
use super::{integration_test, IntegrationTest};
use super::integration_test;
const MAX_RETRIES: u32 = 100;
const RETRY_DURATION_MS: u64 = 10;
#[integration_test]
fn long_name() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/name_test");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/name_test");
let mut bpf = Bpf::load(bytes).unwrap();
let name_prog: &mut Xdp = bpf
.program_mut("ihaveaverylongname")
@ -38,7 +38,7 @@ fn long_name() {
#[integration_test]
fn multiple_btf_maps() {
let bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap-btf.bpf.o");
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o");
let mut bpf = Bpf::load(bytes).unwrap();
let map_1: Array<_, u64> = bpf.take_map("map_1").unwrap().try_into().unwrap();
@ -85,7 +85,7 @@ macro_rules! assert_loaded {
#[integration_test]
fn unload_xdp() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test");
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut Xdp = bpf
.program_mut("test_unload_xdp")
@ -115,7 +115,7 @@ fn unload_xdp() {
#[integration_test]
fn unload_kprobe() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test");
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut KProbe = bpf
.program_mut("test_unload_kpr")
@ -150,7 +150,7 @@ fn pin_link() {
return;
}
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/test");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/test");
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut Xdp = bpf
.program_mut("test_unload_xdp")
@ -185,7 +185,7 @@ fn pin_lifecycle() {
return;
}
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass");
// 1. Load Program and Pin
{

@ -4,6 +4,7 @@ use libc::{uname, utsname};
use regex::Regex;
use std::{ffi::CStr, mem};
pub mod btf_relocations;
pub mod elf;
pub mod load;
pub mod rbpf;

@ -4,11 +4,11 @@ use std::collections::HashMap;
use aya::include_bytes_aligned;
use aya_obj::{generated::bpf_insn, Object, ProgramSection};
use super::{integration_test, IntegrationTest};
use super::integration_test;
#[integration_test]
fn run_with_rbpf() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass");
let object = Object::parse(bytes).unwrap();
assert_eq!(object.programs.len(), 1);
@ -36,7 +36,7 @@ static mut MULTIMAP_MAPS: [*mut Vec<u64>; 2] = [null_mut(), null_mut()];
#[integration_test]
fn use_map_with_rbpf() {
let bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap-btf.bpf.o");
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o");
let mut object = Object::parse(bytes).unwrap();
assert_eq!(object.programs.len(), 1);
@ -69,14 +69,20 @@ fn use_map_with_rbpf() {
}
}
let text_sections = object
.functions
.iter()
.map(|((section_index, _), _)| *section_index)
.collect();
object
.relocate_maps(
maps.iter()
.map(|(s, (fd, map))| (s.as_ref() as &str, Some(*fd), map)),
&text_sections,
)
.expect("Relocation failed");
// Actually there is no local function call involved.
object.relocate_calls().unwrap();
object.relocate_calls(&text_sections).unwrap();
// Executes the program
assert_eq!(object.programs.len(), 1);

@ -1,313 +1,70 @@
use anyhow::{Context, Result};
use std::{path::PathBuf, process::Command, thread::sleep, time::Duration};
use tempfile::TempDir;
use std::{process::exit, time::Duration};
use aya::{maps::Array, programs::TracePoint, BpfLoader, Btf, Endianness};
use super::{integration_test, IntegrationTest};
// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no
// special meaning, they just have "nice" bit patterns that can be helpful while debugging.
#[integration_test]
fn relocate_field() {
let test = RelocationTest {
local_definition: r#"
struct foo {
__u8 a;
__u8 b;
__u8 c;
__u8 d;
};
"#,
target_btf: r#"
struct foo {
__u8 a;
__u8 c;
__u8 b;
__u8 d;
} s1;
"#,
relocation_code: r#"
__u8 memory[] = {1, 2, 3, 4};
struct foo *ptr = (struct foo *) &memory;
value = __builtin_preserve_access_index(ptr->c);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 2);
assert_eq!(test.run_no_btf().unwrap(), 3);
}
#[integration_test]
fn relocate_enum() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAA };
"#,
target_btf: r#"
enum foo { D = 0xBBBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xBBBBBBBB);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA);
}
use aya::{
include_bytes_aligned,
programs::{ProgramError, UProbe},
Bpf,
};
use integration_test_macros::integration_test;
#[integration_test]
fn relocate_enum_signed() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0x7AAAAAAA };
"#,
target_btf: r#"
enum foo { D = -0x7BBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64);
assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64);
fn relocations() {
let bpf = load_and_attach(
"test_64_32_call_relocs",
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/relocations"),
);
trigger_relocations_program();
std::thread::sleep(Duration::from_millis(100));
let m = aya::maps::Array::<_, u64>::try_from(bpf.map("RESULTS").unwrap()).unwrap();
assert_eq!(m.get(&0, 0).unwrap(), 1);
assert_eq!(m.get(&1, 0).unwrap(), 2);
assert_eq!(m.get(&2, 0).unwrap(), 3);
}
#[integration_test]
fn relocate_enum64() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = 0xCCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB);
}
fn text_64_64_reloc() {
let mut bpf = load_and_attach(
"test_text_64_64_reloc",
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/text_64_64_reloc.o"),
);
#[integration_test]
fn relocate_enum64_signed() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0xAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = -0xCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64);
assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64);
}
let mut m = aya::maps::Array::<_, u64>::try_from(bpf.map_mut("RESULTS").unwrap()).unwrap();
m.set(0, 1, 0).unwrap();
m.set(1, 2, 0).unwrap();
#[integration_test]
fn relocate_pointer() {
let test = RelocationTest {
local_definition: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
target_btf: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
relocation_code: r#"
__u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0};
struct bar* ptr = (struct bar *) &memory;
value = (__u64) __builtin_preserve_access_index(ptr->f);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 42);
assert_eq!(test.run_no_btf().unwrap(), 42);
}
trigger_relocations_program();
std::thread::sleep(Duration::from_millis(100));
/// Utility code for running relocation tests:
/// - Generates the eBPF program using probided local definition and relocation code
/// - Generates the BTF from the target btf code
struct RelocationTest {
/// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode
local_definition: &'static str,
/// Target data structure definition. What the vmlinux would actually contain.
target_btf: &'static str,
/// Code executed by the eBPF program to test the relocation.
/// The format should be:
// __u8 memory[] = { ... };
// __u32 value = BPF_CORE_READ((struct foo *)&memory, ...);
//
// The generated code will be executed by attaching a tracepoint to sched_switch
// and emitting `__u32 value` an a map. See the code template below for more details.
relocation_code: &'static str,
assert_eq!(m.get(&0, 0).unwrap(), 2);
assert_eq!(m.get(&1, 0).unwrap(), 3);
}
impl RelocationTest {
/// Build a RelocationTestRunner
fn build(&self) -> Result<RelocationTestRunner> {
Ok(RelocationTestRunner {
ebpf: self.build_ebpf()?,
btf: self.build_btf()?,
})
}
/// - Generate the source eBPF filling a template
/// - Compile it with clang
fn build_ebpf(&self) -> Result<Vec<u8>> {
let local_definition = self.local_definition;
let relocation_code = self.relocation_code;
let (_tmp_dir, compiled_file) = compile(&format!(
r#"
#include <linux/bpf.h>
static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2;
{local_definition}
struct {{
int (*type)[BPF_MAP_TYPE_ARRAY];
__u32 *key;
__u64 *value;
int (*max_entries)[1];
}} output_map
__attribute__((section(".maps"), used));
__attribute__((section("tracepoint/bpf_prog"), used))
int bpf_prog(void *ctx) {{
__u32 key = 0;
__u64 value = 0;
{relocation_code}
bpf_map_update_elem(&output_map, &key, &value, BPF_ANY);
return 0;
}}
char _license[] __attribute__((section("license"), used)) = "GPL";
"#
))
.context("Failed to compile eBPF program")?;
let bytecode =
std::fs::read(compiled_file).context("Error reading compiled eBPF program")?;
Ok(bytecode)
}
/// - Generate the target BTF source with a mock main()
/// - Compile it with clang
/// - Extract the BTF with llvm-objcopy
fn build_btf(&self) -> Result<Btf> {
let target_btf = self.target_btf;
let relocation_code = self.relocation_code;
// BTF files can be generated and inspected with these commands:
// $ clang -c -g -O2 -target bpf target.c
// $ pahole --btf_encode_detached=target.btf -V target.o
// $ bpftool btf dump file ./target.btf format c
let (tmp_dir, compiled_file) = compile(&format!(
r#"
#include <linux/bpf.h>
{target_btf}
int main() {{
__u64 value = 0;
// This is needed to make sure to emit BTF for the defined types,
// it could be dead code eliminated if we don't.
{relocation_code};
return value;
}}
"#
))
.context("Failed to compile BTF")?;
Command::new("llvm-objcopy")
.current_dir(tmp_dir.path())
.args(["--dump-section", ".BTF=target.btf"])
.arg(compiled_file)
.status()
.context("Failed to run llvm-objcopy")?
.success()
.then_some(())
.context("Failed to extract BTF")?;
let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default())
.context("Error parsing generated BTF")?;
Ok(btf)
}
}
fn load_and_attach(name: &str, bytes: &[u8]) -> Bpf {
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut UProbe = bpf.program_mut(name).unwrap().try_into().unwrap();
if let Err(ProgramError::LoadError {
io_error,
verifier_log,
}) = prog.load()
{
println!("Failed to load program `{name}`: {io_error}. Verifier log:\n{verifier_log:#}");
exit(1);
};
/// Compile an eBPF program and return the path of the compiled object.
/// Also returns a TempDir handler, dropping it will clear the created dicretory.
fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> {
let tmp_dir = tempfile::tempdir().context("Error making temp dir")?;
let source = tmp_dir.path().join("source.c");
std::fs::write(&source, source_code).context("Writing bpf program failed")?;
Command::new("clang")
.current_dir(&tmp_dir)
.args(["-c", "-g", "-O2", "-target", "bpf"])
.arg(&source)
.status()
.context("Failed to run clang")?
.success()
.then_some(())
.context("Failed to compile eBPF source")?;
Ok((tmp_dir, source.with_extension("o")))
}
prog.attach(
Some("trigger_relocations_program"),
0,
"/proc/self/exe",
None,
)
.unwrap();
struct RelocationTestRunner {
ebpf: Vec<u8>,
btf: Btf,
bpf
}
impl RelocationTestRunner {
/// Run test and return the output value
fn run(&self) -> Result<u64> {
self.run_internal(true).context("Error running with BTF")
}
/// Run without loading btf
fn run_no_btf(&self) -> Result<u64> {
self.run_internal(false)
.context("Error running without BTF")
}
fn run_internal(&self, with_relocations: bool) -> Result<u64> {
let mut loader = BpfLoader::new();
if with_relocations {
loader.btf(Some(&self.btf));
} else {
loader.btf(None);
}
let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?;
let program: &mut TracePoint = bpf
.program_mut("bpf_prog")
.context("bpf_prog not found")?
.try_into()
.context("program not a tracepoint")?;
program.load().context("Loading tracepoint failed")?;
// Attach to sched_switch and wait some time to make sure it executed at least once
program
.attach("sched", "sched_switch")
.context("attach failed")?;
sleep(Duration::from_millis(1000));
// To inspect the loaded eBPF bytecode, increse the timeout and run:
// $ sudo bpftool prog dump xlated name bpf_prog
let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap();
let key = 0;
output_map.get(&key, 0).context("Getting key 0 failed")
}
}
#[no_mangle]
#[inline(never)]
pub extern "C" fn trigger_relocations_program() {}

@ -5,11 +5,11 @@ use aya::{
};
use log::info;
use super::{integration_test, kernel_version, IntegrationTest};
use super::{integration_test, kernel_version};
#[integration_test]
fn xdp() {
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass");
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/pass");
let mut bpf = Bpf::load(bytes).unwrap();
let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
dispatcher.load().unwrap();
@ -28,13 +28,14 @@ fn extension() {
}
// TODO: Check kernel version == 5.9 or later
let main_bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/main.bpf.o");
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/main.bpf.o");
let mut bpf = Bpf::load(main_bytes).unwrap();
let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
pass.load().unwrap();
pass.attach("lo", XdpFlags::default()).unwrap();
let ext_bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/ext.bpf.o");
let ext_bytes =
include_bytes_aligned!("../../../../target/bpfel-unknown-none/release/ext.bpf.o");
let mut bpf = BpfLoader::new().extension("drop").load(ext_bytes).unwrap();
let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap();
drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap();

@ -40,7 +40,7 @@ fi
# Test Image
if [ -z "${AYA_TEST_IMAGE}" ]; then
AYA_TEST_IMAGE="fedora37"
AYA_TEST_IMAGE="fedora38"
fi
case "${AYA_TEST_IMAGE}" in
@ -59,6 +59,14 @@ download_images() {
curl -o "${AYA_IMGDIR}/fedora37.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}"
fi
;;
fedora38)
if [ ! -f "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" ]; then
IMAGE="Fedora-Cloud-Base-38_Beta-1.3.${AYA_GUEST_ARCH}.qcow2"
IMAGE_URL="https://fr2.rpmfind.net/linux/fedora/linux/releases/test/38_Beta/Cloud/${AYA_GUEST_ARCH}/images"
echo "Downloading: ${IMAGE}, this may take a while..."
curl -o "${AYA_IMGDIR}/fedora38.${AYA_GUEST_ARCH}.qcow2" -sSL "${IMAGE_URL}/${IMAGE}"
fi
;;
centos8)
if [ ! -f "${AYA_IMGDIR}/centos8.${AYA_GUEST_ARCH}.qcow2" ]; then
IMAGE="CentOS-8-GenericCloud-8.4.2105-20210603.0.${AYA_GUEST_ARCH}.qcow2"
@ -181,6 +189,9 @@ EOF
echo "VM launched"
exec_vm uname -a
echo "Enabling testing repositories"
exec_vm sudo dnf config-manager --set-enabled updates-testing
exec_vm sudo dnf config-manager --set-enabled updates-testing-modular
echo "Installing dependencies"
exec_vm sudo dnf install -qy bpftool llvm llvm-devel clang clang-devel zlib-devel
exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \

@ -41,9 +41,6 @@ pub struct BuildEbpfOptions {
/// Set the endianness of the BPF target
#[clap(default_value = "bpfel-unknown-none", long)]
pub target: Architecture,
/// Build the release target
#[clap(long)]
pub release: bool,
/// Libbpf dir, required for compiling C code
#[clap(long, action)]
pub libbpf_dir: PathBuf,
@ -59,17 +56,15 @@ fn build_rust_ebpf(opts: &BuildEbpfOptions) -> anyhow::Result<()> {
dir.push("test/integration-ebpf");
let target = format!("--target={}", opts.target);
let mut args = vec![
let args = vec![
"+nightly",
"build",
"--release",
"--verbose",
target.as_str(),
"-Z",
"build-std=core",
];
if opts.release {
args.push("--release")
}
let status = Command::new("cargo")
.current_dir(&dir)
.args(&args)
@ -99,7 +94,7 @@ fn build_c_ebpf(opts: &BuildEbpfOptions) -> anyhow::Result<()> {
let mut out_path = PathBuf::from(WORKSPACE_ROOT.to_string());
out_path.push("target");
out_path.push(opts.target.to_string());
out_path.push(if opts.release { "release " } else { "debug" });
out_path.push("release");
let include_path = out_path.join("include");
get_libbpf_headers(&opts.libbpf_dir, &include_path)?;

@ -45,7 +45,6 @@ pub fn run(opts: Options) -> Result<(), anyhow::Error> {
// build our ebpf program followed by our application
build_ebpf(BuildOptions {
target: opts.bpf_target,
release: opts.release,
libbpf_dir: PathBuf::from(&opts.libbpf_dir),
})
.context("Error while building eBPF program")?;

Loading…
Cancel
Save