mirror of https://github.com/aya-rs/aya
Merge branch 'aya-rs:main' into interface
commit
c7a997b48e
@ -0,0 +1,43 @@
|
||||
name: integration-tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: libbpf/libbpf
|
||||
path: libbpf
|
||||
|
||||
- name: Install Pre-requisites
|
||||
run: |
|
||||
brew install qemu gnu-getopt coreutils cdrtools
|
||||
|
||||
- name: Cache tmp files
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.tmp/*.qcow2
|
||||
.tmp/test_rsa
|
||||
.tmp/test_rsa.pub
|
||||
# FIXME: we should invalidate the cache on new bpf-linker releases.
|
||||
# For now we must manually delete the cache when we release a new
|
||||
# bpf-linker version.
|
||||
key: tmp-files-${{ hashFiles('test/run.sh') }}
|
||||
|
||||
- name: Run integration tests
|
||||
run: test/run.sh ./libbpf
|
@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "aya-obj"
|
||||
version = "0.1.0"
|
||||
description = "An eBPF object file parsing library with BTF and relocation support."
|
||||
keywords = ["ebpf", "bpf", "btf", "elf", "object"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
authors = ["The Aya Contributors"]
|
||||
repository = "https://github.com/aya-rs/aya"
|
||||
readme = "README.md"
|
||||
documentation = "https://docs.rs/aya-obj"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bytes = "1"
|
||||
log = "0.4"
|
||||
object = { version = "0.30", default-features = false, features = ["read_core", "elf"] }
|
||||
hashbrown = { version = "0.13", optional = true }
|
||||
thiserror-std = { package = "thiserror", version = "1" }
|
||||
thiserror-core = { version = "1", default-features = false, features = [], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
matches = "0.1.8"
|
||||
rbpf = "0.1.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
no_std = ["hashbrown", "thiserror-core"]
|
@ -0,0 +1,55 @@
|
||||
# aya-obj
|
||||
|
||||
## Status
|
||||
|
||||
This crate includes code that started as internal API used by
|
||||
the [aya] crate. It has been split out so that it can be used by
|
||||
other projects that deal with eBPF object files. Unless you're writing
|
||||
low level eBPF plumbing tools, you should not need to use this crate
|
||||
but see the [aya] crate instead.
|
||||
|
||||
The API as it is today has a few rough edges and is generally not as
|
||||
polished nor stable as the main [aya] crate API. As always,
|
||||
improvements welcome!
|
||||
|
||||
[aya]: https://github.com/aya-rs/aya
|
||||
|
||||
## Overview
|
||||
|
||||
eBPF programs written with [libbpf] or [aya-bpf] are usually compiled
|
||||
into an ELF object file, using various sections to store information
|
||||
about the eBPF programs.
|
||||
|
||||
`aya-obj` is a library for parsing such eBPF object files, with BTF and
|
||||
relocation support.
|
||||
|
||||
[libbpf]: https://github.com/libbpf/libbpf
|
||||
[aya-bpf]: https://github.com/aya-rs/aya
|
||||
|
||||
## Example
|
||||
|
||||
This example loads a simple eBPF program and runs it with [rbpf].
|
||||
|
||||
```rust
|
||||
use aya_obj::{generated::bpf_insn, Object};
|
||||
|
||||
// Parse the object file
|
||||
let bytes = std::fs::read("program.o").unwrap();
|
||||
let mut object = Object::parse(&bytes).unwrap();
|
||||
// Relocate the programs
|
||||
object.relocate_calls().unwrap();
|
||||
object.relocate_maps(std::iter::empty()).unwrap();
|
||||
|
||||
// Run with rbpf
|
||||
let instructions = &object.programs["prog_name"].function.instructions;
|
||||
let data = unsafe {
|
||||
core::slice::from_raw_parts(
|
||||
instructions.as_ptr() as *const u8,
|
||||
instructions.len() * core::mem::size_of::<bpf_insn>(),
|
||||
)
|
||||
};
|
||||
let vm = rbpf::EbpfVmNoData::new(Some(data)).unwrap();
|
||||
let _return = vm.execute_program().unwrap();
|
||||
```
|
||||
|
||||
[rbpf]: https://github.com/qmonnet/rbpf
|
@ -0,0 +1,12 @@
|
||||
//! BTF loading, parsing and relocation.
|
||||
|
||||
#[allow(clippy::module_inception)]
|
||||
mod btf;
|
||||
mod info;
|
||||
mod relocation;
|
||||
mod types;
|
||||
|
||||
pub use btf::*;
|
||||
pub use info::*;
|
||||
pub use relocation::BtfRelocationError;
|
||||
pub use types::*;
|
@ -1,10 +1,10 @@
|
||||
/* automatically generated by rust-bindgen 0.60.1 */
|
||||
/* automatically generated by rust-bindgen 0.63.0 */
|
||||
|
||||
pub type __u8 = ::std::os::raw::c_uchar;
|
||||
pub type __u16 = ::std::os::raw::c_ushort;
|
||||
pub type __u32 = ::std::os::raw::c_uint;
|
||||
pub type __u8 = ::core::ffi::c_uchar;
|
||||
pub type __u16 = ::core::ffi::c_ushort;
|
||||
pub type __u32 = ::core::ffi::c_uint;
|
||||
pub mod bpf_core_relo_kind {
|
||||
pub type Type = ::std::os::raw::c_uint;
|
||||
pub type Type = ::core::ffi::c_uint;
|
||||
pub const BPF_CORE_FIELD_BYTE_OFFSET: Type = 0;
|
||||
pub const BPF_CORE_FIELD_BYTE_SIZE: Type = 1;
|
||||
pub const BPF_CORE_FIELD_EXISTS: Type = 2;
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,5 @@
|
||||
//! eBPF bindings generated by rust-bindgen
|
||||
|
||||
#![allow(
|
||||
dead_code,
|
||||
non_camel_case_types,
|
@ -0,0 +1,85 @@
|
||||
//! An eBPF object file parsing library with BTF and relocation support.
|
||||
//!
|
||||
//! # Status
|
||||
//!
|
||||
//! This crate includes code that started as internal API used by
|
||||
//! the [aya] crate. It has been split out so that it can be used by
|
||||
//! other projects that deal with eBPF object files. Unless you're writing
|
||||
//! low level eBPF plumbing tools, you should not need to use this crate
|
||||
//! but see the [aya] crate instead.
|
||||
//!
|
||||
//! The API as it is today has a few rough edges and is generally not as
|
||||
//! polished nor stable as the main [aya] crate API. As always,
|
||||
//! improvements welcome!
|
||||
//!
|
||||
//! [aya]: https://github.com/aya-rs/aya
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! eBPF programs written with [libbpf] or [aya-bpf] are usually compiled
|
||||
//! into an ELF object file, using various sections to store information
|
||||
//! about the eBPF programs.
|
||||
//!
|
||||
//! `aya-obj` is a library for parsing such eBPF object files, with BTF and
|
||||
//! relocation support.
|
||||
//!
|
||||
//! [libbpf]: https://github.com/libbpf/libbpf
|
||||
//! [aya-bpf]: https://github.com/aya-rs/aya
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! This example loads a simple eBPF program and runs it with [rbpf].
|
||||
//!
|
||||
//! ```no_run
|
||||
//! use aya_obj::{generated::bpf_insn, Object};
|
||||
//!
|
||||
//! // Parse the object file
|
||||
//! let bytes = std::fs::read("program.o").unwrap();
|
||||
//! let mut object = Object::parse(&bytes).unwrap();
|
||||
//! // Relocate the programs
|
||||
//! object.relocate_calls().unwrap();
|
||||
//! object.relocate_maps(std::iter::empty()).unwrap();
|
||||
//!
|
||||
//! // Run with rbpf
|
||||
//! let instructions = &object.programs["prog_name"].function.instructions;
|
||||
//! let data = unsafe {
|
||||
//! core::slice::from_raw_parts(
|
||||
//! instructions.as_ptr() as *const u8,
|
||||
//! instructions.len() * core::mem::size_of::<bpf_insn>(),
|
||||
//! )
|
||||
//! };
|
||||
//! let vm = rbpf::EbpfVmNoData::new(Some(data)).unwrap();
|
||||
//! let _return = vm.execute_program().unwrap();
|
||||
//! ```
|
||||
//!
|
||||
//! [rbpf]: https://github.com/qmonnet/rbpf
|
||||
|
||||
#![no_std]
|
||||
#![doc(
|
||||
html_logo_url = "https://aya-rs.dev/assets/images/crabby.svg",
|
||||
html_favicon_url = "https://aya-rs.dev/assets/images/crabby.svg"
|
||||
)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![deny(clippy::all, missing_docs)]
|
||||
#![allow(clippy::missing_safety_doc, clippy::len_without_is_empty)]
|
||||
#![cfg_attr(feature = "no_std", feature(error_in_core))]
|
||||
|
||||
#[cfg(feature = "no_std")]
|
||||
pub(crate) use thiserror_core as thiserror;
|
||||
#[cfg(not(feature = "no_std"))]
|
||||
pub(crate) use thiserror_std as thiserror;
|
||||
|
||||
extern crate alloc;
|
||||
#[cfg(not(feature = "no_std"))]
|
||||
extern crate std;
|
||||
|
||||
pub mod btf;
|
||||
pub mod generated;
|
||||
pub mod maps;
|
||||
pub mod obj;
|
||||
pub mod programs;
|
||||
pub mod relocation;
|
||||
mod util;
|
||||
|
||||
pub use maps::Map;
|
||||
pub use obj::*;
|
@ -0,0 +1,303 @@
|
||||
//! Map struct and type bindings.
|
||||
|
||||
use core::mem;
|
||||
|
||||
use crate::thiserror::{self, Error};
|
||||
use alloc::vec::Vec;
|
||||
|
||||
/// Invalid map type encontered
|
||||
pub struct InvalidMapTypeError {
|
||||
/// The map type
|
||||
pub map_type: u32,
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for crate::generated::bpf_map_type {
|
||||
type Error = InvalidMapTypeError;
|
||||
|
||||
fn try_from(map_type: u32) -> Result<Self, Self::Error> {
|
||||
use crate::generated::bpf_map_type::*;
|
||||
Ok(match map_type {
|
||||
x if x == BPF_MAP_TYPE_UNSPEC as u32 => BPF_MAP_TYPE_UNSPEC,
|
||||
x if x == BPF_MAP_TYPE_HASH as u32 => BPF_MAP_TYPE_HASH,
|
||||
x if x == BPF_MAP_TYPE_ARRAY as u32 => BPF_MAP_TYPE_ARRAY,
|
||||
x if x == BPF_MAP_TYPE_PROG_ARRAY as u32 => BPF_MAP_TYPE_PROG_ARRAY,
|
||||
x if x == BPF_MAP_TYPE_PERF_EVENT_ARRAY as u32 => BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
x if x == BPF_MAP_TYPE_PERCPU_HASH as u32 => BPF_MAP_TYPE_PERCPU_HASH,
|
||||
x if x == BPF_MAP_TYPE_PERCPU_ARRAY as u32 => BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
x if x == BPF_MAP_TYPE_STACK_TRACE as u32 => BPF_MAP_TYPE_STACK_TRACE,
|
||||
x if x == BPF_MAP_TYPE_CGROUP_ARRAY as u32 => BPF_MAP_TYPE_CGROUP_ARRAY,
|
||||
x if x == BPF_MAP_TYPE_LRU_HASH as u32 => BPF_MAP_TYPE_LRU_HASH,
|
||||
x if x == BPF_MAP_TYPE_LRU_PERCPU_HASH as u32 => BPF_MAP_TYPE_LRU_PERCPU_HASH,
|
||||
x if x == BPF_MAP_TYPE_LPM_TRIE as u32 => BPF_MAP_TYPE_LPM_TRIE,
|
||||
x if x == BPF_MAP_TYPE_BLOOM_FILTER as u32 => BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
x if x == BPF_MAP_TYPE_ARRAY_OF_MAPS as u32 => BPF_MAP_TYPE_ARRAY_OF_MAPS,
|
||||
x if x == BPF_MAP_TYPE_HASH_OF_MAPS as u32 => BPF_MAP_TYPE_HASH_OF_MAPS,
|
||||
x if x == BPF_MAP_TYPE_DEVMAP as u32 => BPF_MAP_TYPE_DEVMAP,
|
||||
x if x == BPF_MAP_TYPE_SOCKMAP as u32 => BPF_MAP_TYPE_SOCKMAP,
|
||||
x if x == BPF_MAP_TYPE_CPUMAP as u32 => BPF_MAP_TYPE_CPUMAP,
|
||||
x if x == BPF_MAP_TYPE_XSKMAP as u32 => BPF_MAP_TYPE_XSKMAP,
|
||||
x if x == BPF_MAP_TYPE_SOCKHASH as u32 => BPF_MAP_TYPE_SOCKHASH,
|
||||
x if x == BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED as u32 => {
|
||||
BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED
|
||||
}
|
||||
x if x == BPF_MAP_TYPE_CGRP_STORAGE as u32 => BPF_MAP_TYPE_CGRP_STORAGE,
|
||||
x if x == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY as u32 => BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
|
||||
x if x == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE as u32 => {
|
||||
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
|
||||
}
|
||||
x if x == BPF_MAP_TYPE_QUEUE as u32 => BPF_MAP_TYPE_QUEUE,
|
||||
x if x == BPF_MAP_TYPE_STACK as u32 => BPF_MAP_TYPE_STACK,
|
||||
x if x == BPF_MAP_TYPE_SK_STORAGE as u32 => BPF_MAP_TYPE_SK_STORAGE,
|
||||
x if x == BPF_MAP_TYPE_DEVMAP_HASH as u32 => BPF_MAP_TYPE_DEVMAP_HASH,
|
||||
x if x == BPF_MAP_TYPE_STRUCT_OPS as u32 => BPF_MAP_TYPE_STRUCT_OPS,
|
||||
x if x == BPF_MAP_TYPE_RINGBUF as u32 => BPF_MAP_TYPE_RINGBUF,
|
||||
x if x == BPF_MAP_TYPE_INODE_STORAGE as u32 => BPF_MAP_TYPE_INODE_STORAGE,
|
||||
x if x == BPF_MAP_TYPE_TASK_STORAGE as u32 => BPF_MAP_TYPE_TASK_STORAGE,
|
||||
x if x == BPF_MAP_TYPE_BLOOM_FILTER as u32 => BPF_MAP_TYPE_BLOOM_FILTER,
|
||||
x if x == BPF_MAP_TYPE_USER_RINGBUF as u32 => BPF_MAP_TYPE_USER_RINGBUF,
|
||||
x if x == BPF_MAP_TYPE_CGRP_STORAGE as u32 => BPF_MAP_TYPE_CGRP_STORAGE,
|
||||
_ => return Err(InvalidMapTypeError { map_type }),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// BTF definition of a map
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct BtfMapDef {
|
||||
pub(crate) map_type: u32,
|
||||
pub(crate) key_size: u32,
|
||||
pub(crate) value_size: u32,
|
||||
pub(crate) max_entries: u32,
|
||||
pub(crate) map_flags: u32,
|
||||
pub(crate) pinning: PinningType,
|
||||
/// BTF type id of the map key
|
||||
pub btf_key_type_id: u32,
|
||||
/// BTF type id of the map value
|
||||
pub btf_value_type_id: u32,
|
||||
}
|
||||
|
||||
/// The pinning type
|
||||
///
|
||||
/// Upon pinning a map, a file representation is created for the map,
|
||||
/// so that the map can be alive and retrievable across sessions.
|
||||
#[repr(u32)]
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
|
||||
pub enum PinningType {
|
||||
/// No pinning
|
||||
#[default]
|
||||
None = 0,
|
||||
/// Pin by the name
|
||||
ByName = 1,
|
||||
}
|
||||
|
||||
/// The error type returned when failing to parse a [PinningType]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum PinningError {
|
||||
/// Unsupported pinning type
|
||||
#[error("unsupported pinning type `{pinning_type}`")]
|
||||
Unsupported {
|
||||
/// The unsupported pinning type
|
||||
pinning_type: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl TryFrom<u32> for PinningType {
|
||||
type Error = PinningError;
|
||||
|
||||
fn try_from(value: u32) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
0 => Ok(PinningType::None),
|
||||
1 => Ok(PinningType::ByName),
|
||||
pinning_type => Err(PinningError::Unsupported { pinning_type }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Map definition in legacy BPF map declaration style
|
||||
#[allow(non_camel_case_types)]
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct bpf_map_def {
|
||||
// minimum features required by old BPF programs
|
||||
/// The map type
|
||||
pub map_type: u32,
|
||||
/// The key_size
|
||||
pub key_size: u32,
|
||||
/// The value size
|
||||
pub value_size: u32,
|
||||
/// Max entry number
|
||||
pub max_entries: u32,
|
||||
/// Map flags
|
||||
pub map_flags: u32,
|
||||
// optional features
|
||||
/// Id
|
||||
pub id: u32,
|
||||
/// Pinning type
|
||||
pub pinning: PinningType,
|
||||
}
|
||||
|
||||
/// The first five __u32 of `bpf_map_def` must be defined.
|
||||
pub(crate) const MINIMUM_MAP_SIZE: usize = mem::size_of::<u32>() * 5;
|
||||
|
||||
/// Kinds of maps
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum MapKind {
|
||||
/// A map holding `.bss` section data
|
||||
Bss,
|
||||
/// A map holding `.data` section data
|
||||
Data,
|
||||
/// A map holding `.rodata` section data
|
||||
Rodata,
|
||||
/// Other maps
|
||||
Other,
|
||||
}
|
||||
|
||||
impl From<&str> for MapKind {
|
||||
fn from(s: &str) -> Self {
|
||||
if s == ".bss" {
|
||||
MapKind::Bss
|
||||
} else if s.starts_with(".data") {
|
||||
MapKind::Data
|
||||
} else if s.starts_with(".rodata") {
|
||||
MapKind::Rodata
|
||||
} else {
|
||||
MapKind::Other
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Map data defined in `maps` or `.maps` sections
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Map {
|
||||
/// A map defined in the `maps` section
|
||||
Legacy(LegacyMap),
|
||||
/// A map defined in the `.maps` section
|
||||
Btf(BtfMap),
|
||||
}
|
||||
|
||||
impl Map {
|
||||
/// Returns the map type
|
||||
pub fn map_type(&self) -> u32 {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.map_type,
|
||||
Map::Btf(m) => m.def.map_type,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the key size in bytes
|
||||
pub fn key_size(&self) -> u32 {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.key_size,
|
||||
Map::Btf(m) => m.def.key_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the value size in bytes
|
||||
pub fn value_size(&self) -> u32 {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.value_size,
|
||||
Map::Btf(m) => m.def.value_size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the max entry number
|
||||
pub fn max_entries(&self) -> u32 {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.max_entries,
|
||||
Map::Btf(m) => m.def.max_entries,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the max entry number
|
||||
pub fn set_max_entries(&mut self, v: u32) {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.max_entries = v,
|
||||
Map::Btf(m) => m.def.max_entries = v,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the map flags
|
||||
pub fn map_flags(&self) -> u32 {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.map_flags,
|
||||
Map::Btf(m) => m.def.map_flags,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the pinning type of the map
|
||||
pub fn pinning(&self) -> PinningType {
|
||||
match self {
|
||||
Map::Legacy(m) => m.def.pinning,
|
||||
Map::Btf(m) => m.def.pinning,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the map data
|
||||
pub fn data(&self) -> &[u8] {
|
||||
match self {
|
||||
Map::Legacy(m) => &m.data,
|
||||
Map::Btf(m) => &m.data,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the map data as mutable
|
||||
pub fn data_mut(&mut self) -> &mut Vec<u8> {
|
||||
match self {
|
||||
Map::Legacy(m) => m.data.as_mut(),
|
||||
Map::Btf(m) => m.data.as_mut(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the map kind
|
||||
pub fn kind(&self) -> MapKind {
|
||||
match self {
|
||||
Map::Legacy(m) => m.kind,
|
||||
Map::Btf(m) => m.kind,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the section index
|
||||
pub fn section_index(&self) -> usize {
|
||||
match self {
|
||||
Map::Legacy(m) => m.section_index,
|
||||
Map::Btf(m) => m.section_index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the symbol index
|
||||
pub fn symbol_index(&self) -> usize {
|
||||
match self {
|
||||
Map::Legacy(m) => m.symbol_index,
|
||||
Map::Btf(m) => m.symbol_index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A map declared with legacy BPF map declaration style, most likely from a `maps` section.
|
||||
///
|
||||
/// See [Drop support for legacy BPF map declaration syntax - Libbpf: the road to v1.0](https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#drop-support-for-legacy-bpf-map-declaration-syntax)
|
||||
/// for more info.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LegacyMap {
|
||||
/// The definition of the map
|
||||
pub def: bpf_map_def,
|
||||
/// The section index
|
||||
pub section_index: usize,
|
||||
/// The symbol index
|
||||
pub symbol_index: usize,
|
||||
/// The map data
|
||||
pub data: Vec<u8>,
|
||||
/// The map kind
|
||||
pub kind: MapKind,
|
||||
}
|
||||
|
||||
/// A BTF-defined map, most likely from a `.maps` section.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BtfMap {
|
||||
/// The definition of the map
|
||||
pub def: BtfMapDef,
|
||||
pub(crate) section_index: usize,
|
||||
pub(crate) symbol_index: usize,
|
||||
pub(crate) kind: MapKind,
|
||||
pub(crate) data: Vec<u8>,
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
//! Cgroup socket programs.
|
||||
use alloc::{borrow::ToOwned, string::String};
|
||||
|
||||
use crate::{
|
||||
generated::bpf_attach_type,
|
||||
thiserror::{self, Error},
|
||||
};
|
||||
|
||||
/// Defines where to attach a `CgroupSock` program.
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub enum CgroupSockAttachType {
|
||||
/// Called after the IPv4 bind events.
|
||||
PostBind4,
|
||||
/// Called after the IPv6 bind events.
|
||||
PostBind6,
|
||||
/// Attach to IPv4 connect events.
|
||||
#[default]
|
||||
SockCreate,
|
||||
/// Attach to IPv6 connect events.
|
||||
SockRelease,
|
||||
}
|
||||
|
||||
impl From<CgroupSockAttachType> for bpf_attach_type {
|
||||
fn from(s: CgroupSockAttachType) -> bpf_attach_type {
|
||||
match s {
|
||||
CgroupSockAttachType::PostBind4 => bpf_attach_type::BPF_CGROUP_INET4_POST_BIND,
|
||||
CgroupSockAttachType::PostBind6 => bpf_attach_type::BPF_CGROUP_INET6_POST_BIND,
|
||||
CgroupSockAttachType::SockCreate => bpf_attach_type::BPF_CGROUP_INET_SOCK_CREATE,
|
||||
CgroupSockAttachType::SockRelease => bpf_attach_type::BPF_CGROUP_INET_SOCK_RELEASE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("{0} is not a valid attach type for a CGROUP_SOCK program")]
|
||||
pub(crate) struct InvalidAttachType(String);
|
||||
|
||||
impl CgroupSockAttachType {
|
||||
pub(crate) fn try_from(value: &str) -> Result<CgroupSockAttachType, InvalidAttachType> {
|
||||
match value {
|
||||
"post_bind4" => Ok(CgroupSockAttachType::PostBind4),
|
||||
"post_bind6" => Ok(CgroupSockAttachType::PostBind6),
|
||||
"sock_create" => Ok(CgroupSockAttachType::SockCreate),
|
||||
"sock_release" => Ok(CgroupSockAttachType::SockRelease),
|
||||
_ => Err(InvalidAttachType(value.to_owned())),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
//! Cgroup socket address programs.
|
||||
use alloc::{borrow::ToOwned, string::String};
|
||||
|
||||
use crate::{
|
||||
generated::bpf_attach_type,
|
||||
thiserror::{self, Error},
|
||||
};
|
||||
|
||||
/// Defines where to attach a `CgroupSockAddr` program.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum CgroupSockAddrAttachType {
|
||||
/// Attach to IPv4 bind events.
|
||||
Bind4,
|
||||
/// Attach to IPv6 bind events.
|
||||
Bind6,
|
||||
/// Attach to IPv4 connect events.
|
||||
Connect4,
|
||||
/// Attach to IPv6 connect events.
|
||||
Connect6,
|
||||
/// Attach to IPv4 getpeername events.
|
||||
GetPeerName4,
|
||||
/// Attach to IPv6 getpeername events.
|
||||
GetPeerName6,
|
||||
/// Attach to IPv4 getsockname events.
|
||||
GetSockName4,
|
||||
/// Attach to IPv6 getsockname events.
|
||||
GetSockName6,
|
||||
/// Attach to IPv4 udp_sendmsg events.
|
||||
UDPSendMsg4,
|
||||
/// Attach to IPv6 udp_sendmsg events.
|
||||
UDPSendMsg6,
|
||||
/// Attach to IPv4 udp_recvmsg events.
|
||||
UDPRecvMsg4,
|
||||
/// Attach to IPv6 udp_recvmsg events.
|
||||
UDPRecvMsg6,
|
||||
}
|
||||
|
||||
impl From<CgroupSockAddrAttachType> for bpf_attach_type {
|
||||
fn from(s: CgroupSockAddrAttachType) -> bpf_attach_type {
|
||||
match s {
|
||||
CgroupSockAddrAttachType::Bind4 => bpf_attach_type::BPF_CGROUP_INET4_BIND,
|
||||
CgroupSockAddrAttachType::Bind6 => bpf_attach_type::BPF_CGROUP_INET6_BIND,
|
||||
CgroupSockAddrAttachType::Connect4 => bpf_attach_type::BPF_CGROUP_INET4_CONNECT,
|
||||
CgroupSockAddrAttachType::Connect6 => bpf_attach_type::BPF_CGROUP_INET6_CONNECT,
|
||||
CgroupSockAddrAttachType::GetPeerName4 => bpf_attach_type::BPF_CGROUP_INET4_GETPEERNAME,
|
||||
CgroupSockAddrAttachType::GetPeerName6 => bpf_attach_type::BPF_CGROUP_INET6_GETPEERNAME,
|
||||
CgroupSockAddrAttachType::GetSockName4 => bpf_attach_type::BPF_CGROUP_INET4_GETSOCKNAME,
|
||||
CgroupSockAddrAttachType::GetSockName6 => bpf_attach_type::BPF_CGROUP_INET6_GETSOCKNAME,
|
||||
CgroupSockAddrAttachType::UDPSendMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_SENDMSG,
|
||||
CgroupSockAddrAttachType::UDPSendMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_SENDMSG,
|
||||
CgroupSockAddrAttachType::UDPRecvMsg4 => bpf_attach_type::BPF_CGROUP_UDP4_RECVMSG,
|
||||
CgroupSockAddrAttachType::UDPRecvMsg6 => bpf_attach_type::BPF_CGROUP_UDP6_RECVMSG,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("{0} is not a valid attach type for a CGROUP_SOCK_ADDR program")]
|
||||
pub(crate) struct InvalidAttachType(String);
|
||||
|
||||
impl CgroupSockAddrAttachType {
|
||||
pub(crate) fn try_from(value: &str) -> Result<CgroupSockAddrAttachType, InvalidAttachType> {
|
||||
match value {
|
||||
"bind4" => Ok(CgroupSockAddrAttachType::Bind4),
|
||||
"bind6" => Ok(CgroupSockAddrAttachType::Bind6),
|
||||
"connect4" => Ok(CgroupSockAddrAttachType::Connect4),
|
||||
"connect6" => Ok(CgroupSockAddrAttachType::Connect6),
|
||||
"getpeername4" => Ok(CgroupSockAddrAttachType::GetPeerName4),
|
||||
"getpeername6" => Ok(CgroupSockAddrAttachType::GetPeerName6),
|
||||
"getsockname4" => Ok(CgroupSockAddrAttachType::GetSockName4),
|
||||
"getsockname6" => Ok(CgroupSockAddrAttachType::GetSockName6),
|
||||
"sendmsg4" => Ok(CgroupSockAddrAttachType::UDPSendMsg4),
|
||||
"sendmsg6" => Ok(CgroupSockAddrAttachType::UDPSendMsg6),
|
||||
"recvmsg4" => Ok(CgroupSockAddrAttachType::UDPRecvMsg4),
|
||||
"recvmsg6" => Ok(CgroupSockAddrAttachType::UDPRecvMsg6),
|
||||
_ => Err(InvalidAttachType(value.to_owned())),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
//! Cgroup socket option programs.
|
||||
use alloc::{borrow::ToOwned, string::String};
|
||||
|
||||
use crate::{
|
||||
generated::bpf_attach_type,
|
||||
thiserror::{self, Error},
|
||||
};
|
||||
|
||||
/// Defines where to attach a `CgroupSockopt` program.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum CgroupSockoptAttachType {
|
||||
/// Attach to GetSockopt.
|
||||
Get,
|
||||
/// Attach to SetSockopt.
|
||||
Set,
|
||||
}
|
||||
|
||||
impl From<CgroupSockoptAttachType> for bpf_attach_type {
|
||||
fn from(s: CgroupSockoptAttachType) -> bpf_attach_type {
|
||||
match s {
|
||||
CgroupSockoptAttachType::Get => bpf_attach_type::BPF_CGROUP_GETSOCKOPT,
|
||||
CgroupSockoptAttachType::Set => bpf_attach_type::BPF_CGROUP_SETSOCKOPT,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[error("{0} is not a valid attach type for a CGROUP_SOCKOPT program")]
|
||||
pub(crate) struct InvalidAttachType(String);
|
||||
|
||||
impl CgroupSockoptAttachType {
|
||||
pub(crate) fn try_from(value: &str) -> Result<CgroupSockoptAttachType, InvalidAttachType> {
|
||||
match value {
|
||||
"getsockopt" => Ok(CgroupSockoptAttachType::Get),
|
||||
"setsockopt" => Ok(CgroupSockoptAttachType::Set),
|
||||
_ => Err(InvalidAttachType(value.to_owned())),
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,9 @@
|
||||
//! Program struct and type bindings.
|
||||
|
||||
pub mod cgroup_sock;
|
||||
pub mod cgroup_sock_addr;
|
||||
pub mod cgroup_sockopt;
|
||||
|
||||
pub use cgroup_sock::CgroupSockAttachType;
|
||||
pub use cgroup_sock_addr::CgroupSockAddrAttachType;
|
||||
pub use cgroup_sockopt::CgroupSockoptAttachType;
|
@ -0,0 +1,12 @@
|
||||
use core::{mem, slice};
|
||||
|
||||
#[cfg(feature = "no_std")]
|
||||
pub(crate) use hashbrown::HashMap;
|
||||
#[cfg(not(feature = "no_std"))]
|
||||
pub(crate) use std::collections::HashMap;
|
||||
|
||||
/// bytes_of converts a <T> to a byte slice
|
||||
pub(crate) unsafe fn bytes_of<T>(val: &T) -> &[u8] {
|
||||
let size = mem::size_of::<T>();
|
||||
slice::from_raw_parts(slice::from_ref(val).as_ptr().cast(), size)
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
#[allow(clippy::module_inception)]
|
||||
mod btf;
|
||||
mod info;
|
||||
mod relocation;
|
||||
mod types;
|
||||
|
||||
pub use btf::*;
|
||||
pub(crate) use info::*;
|
||||
pub use relocation::RelocationError;
|
||||
pub(crate) use types::*;
|
@ -0,0 +1,264 @@
|
||||
#!/bin/bash
|
||||
|
||||
VERBOSITY=0
|
||||
TEMP_D=""
|
||||
DEF_DISK_FORMAT="raw"
|
||||
DEF_FILESYSTEM="iso9660"
|
||||
CR="
|
||||
"
|
||||
|
||||
error() { echo "$@" 1>&2; }
|
||||
fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
|
||||
|
||||
Usage() {
|
||||
cat <<EOF
|
||||
Usage: ${0##*/} [ options ] output user-data [meta-data]
|
||||
|
||||
Create a disk for cloud-init to utilize nocloud
|
||||
|
||||
options:
|
||||
-h | --help show usage
|
||||
-d | --disk-format D disk format to output. default: raw
|
||||
can be anything supported by qemu-img or
|
||||
tar, tar-seed-local, tar-seed-net
|
||||
-H | --hostname H set hostname in metadata to H
|
||||
-f | --filesystem F filesystem format (vfat or iso), default: iso9660
|
||||
|
||||
-i | --interfaces F write network interfaces file into metadata
|
||||
-N | --network-config F write network config file to local datasource
|
||||
-m | --dsmode M add 'dsmode' ('local' or 'net') to the metadata
|
||||
default in cloud-init is 'net', meaning network is
|
||||
required.
|
||||
-V | --vendor-data F vendor-data file
|
||||
-v | --verbose increase verbosity
|
||||
|
||||
Note, --dsmode, --hostname, and --interfaces are incompatible
|
||||
with metadata.
|
||||
|
||||
Example:
|
||||
* cat my-user-data
|
||||
#cloud-config
|
||||
password: passw0rd
|
||||
chpasswd: { expire: False }
|
||||
ssh_pwauth: True
|
||||
* echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
|
||||
* ${0##*/} my-seed.img my-user-data my-meta-data
|
||||
* kvm -net nic -net user,hostfwd=tcp::2222-:22 \\
|
||||
-drive file=disk1.img,if=virtio -drive file=my-seed.img,if=virtio
|
||||
* ssh -p 2222 ubuntu@localhost
|
||||
EOF
|
||||
}
|
||||
|
||||
bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
|
||||
cleanup() {
|
||||
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
|
||||
}
|
||||
|
||||
debug() {
|
||||
local level=${1}; shift;
|
||||
[ "${level}" -gt "${VERBOSITY}" ] && return
|
||||
error "${@}"
|
||||
}
|
||||
|
||||
has_cmd() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
short_opts="hH:i:d:f:m:N:o:V:v"
|
||||
long_opts="disk-format:,dsmode:,filesystem:,help,hostname:,interfaces:,"
|
||||
long_opts="${long_opts}network-config:,output:,vendor-data:,verbose"
|
||||
getopt_out=$(getopt -n "${0##*/}" \
|
||||
-o "${short_opts}" -l "${long_opts}" -- "$@") &&
|
||||
eval set -- "${getopt_out}" ||
|
||||
bad_Usage
|
||||
|
||||
## <<insert default variables here>>
|
||||
output=""
|
||||
userdata=""
|
||||
metadata=""
|
||||
vendordata=""
|
||||
filesystem=""
|
||||
diskformat=$DEF_DISK_FORMAT
|
||||
interfaces=_unset
|
||||
dsmode=""
|
||||
hostname=""
|
||||
ncname="network-config"
|
||||
|
||||
|
||||
while [ $# -ne 0 ]; do
|
||||
cur=${1}; next=${2};
|
||||
case "$cur" in
|
||||
-h|--help) Usage ; exit 0;;
|
||||
-d|--disk-format) diskformat=$next; shift;;
|
||||
-f|--filesystem) filesystem=$next; shift;;
|
||||
-H|--hostname) hostname=$next; shift;;
|
||||
-i|--interfaces) interfaces=$next; shift;;
|
||||
-N|--network-config) netcfg=$next; shift;;
|
||||
-m|--dsmode) dsmode=$next; shift;;
|
||||
-v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
|
||||
-V|--vendor-data) vendordata="$next";;
|
||||
--) shift; break;;
|
||||
esac
|
||||
shift;
|
||||
done
|
||||
|
||||
## check arguments here
|
||||
## how many args do you expect?
|
||||
echo $1
|
||||
echo $2
|
||||
echo $3
|
||||
[ $# -ge 2 ] || bad_Usage "must provide output, userdata"
|
||||
[ $# -le 3 ] || bad_Usage "confused by additional args"
|
||||
|
||||
output=$1
|
||||
userdata=$2
|
||||
metadata=$3
|
||||
|
||||
if [ -n "$metadata" ]; then
|
||||
[ "$interfaces" = "_unset" -a -z "$dsmode" -a -z "$hostname" ] ||
|
||||
fail "metadata is incompatible with:" \
|
||||
"--interfaces, --hostname, --dsmode"
|
||||
fi
|
||||
|
||||
case "$diskformat" in
|
||||
tar|tar-seed-local|tar-seed-net)
|
||||
if [ "${filesystem:-tar}" != "tar" ]; then
|
||||
fail "diskformat=tar is incompatible with filesystem"
|
||||
fi
|
||||
filesystem="$diskformat"
|
||||
;;
|
||||
tar*)
|
||||
fail "supported 'tar' formats are tar, tar-seed-local, tar-seed-net"
|
||||
esac
|
||||
|
||||
if [ -z "$filesystem" ]; then
|
||||
filesystem="$DEF_FILESYSTEM"
|
||||
fi
|
||||
if [ "$filesystem" = "iso" ]; then
|
||||
filesystem="iso9660"
|
||||
fi
|
||||
|
||||
case "$filesystem" in
|
||||
tar*)
|
||||
has_cmd tar ||
|
||||
fail "missing 'tar'. Required for --filesystem=$filesystem";;
|
||||
vfat)
|
||||
has_cmd mkfs.vfat ||
|
||||
fail "missing 'mkfs.vfat'. Required for --filesystem=vfat."
|
||||
has_cmd mcopy ||
|
||||
fail "missing 'mcopy'. Required for --filesystem=vfat."
|
||||
;;
|
||||
iso9660)
|
||||
has_cmd mkisofs ||
|
||||
fail "missing 'mkisofs'. Required for --filesystem=iso9660."
|
||||
;;
|
||||
*) fail "unknown filesystem $filesystem";;
|
||||
esac
|
||||
|
||||
case "$diskformat" in
|
||||
tar*|raw) :;;
|
||||
*) has_cmd "qemu-img" ||
|
||||
fail "missing 'qemu-img'. Required for --disk-format=$diskformat."
|
||||
esac
|
||||
|
||||
[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
|
||||
fail "$interfaces: not a readable file"
|
||||
|
||||
TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
|
||||
fail "failed to make tempdir"
|
||||
trap cleanup EXIT
|
||||
|
||||
files=( "${TEMP_D}/user-data" "${TEMP_D}/meta-data" )
|
||||
if [ -n "$metadata" ]; then
|
||||
cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
|
||||
else
|
||||
instance_id="iid-local01"
|
||||
iface_data=""
|
||||
[ "$interfaces" != "_unset" ] &&
|
||||
iface_data=$(sed ':a;N;$!ba;s/\n/\\n/g' "$interfaces")
|
||||
|
||||
# write json formatted user-data (json is a subset of yaml)
|
||||
mdata=""
|
||||
for kv in "instance-id:$instance_id" "local-hostname:$hostname" \
|
||||
"interfaces:${iface_data}" "dsmode:$dsmode"; do
|
||||
key=${kv%%:*}
|
||||
val=${kv#*:}
|
||||
[ -n "$val" ] || continue
|
||||
mdata="${mdata:+${mdata},${CR}}\"$key\": \"$val\""
|
||||
done
|
||||
printf "{\n%s\n}\n" "$mdata" > "${TEMP_D}/meta-data"
|
||||
fi
|
||||
|
||||
if [ -n "$netcfg" ]; then
|
||||
cp "$netcfg" "${TEMP_D}/$ncname" ||
|
||||
fail "failed to copy network config"
|
||||
files[${#files[@]}]="$TEMP_D/$ncname"
|
||||
fi
|
||||
|
||||
if [ -n "$vendordata" ]; then
|
||||
cp "$vendordata" "${TEMP_D}/vendor-data" ||
|
||||
fail "failed to copy vendor data"
|
||||
files[${#files[@]}]="$TEMP_D/vendor-data"
|
||||
fi
|
||||
|
||||
files_rel=( )
|
||||
for f in "${files[@]}"; do
|
||||
files_rel[${#files_rel[@]}]="${f#${TEMP_D}/}"
|
||||
done
|
||||
|
||||
if [ "$userdata" = "-" ]; then
|
||||
cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
|
||||
else
|
||||
cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
|
||||
fi
|
||||
|
||||
## alternatively, create a vfat filesystem with same files
|
||||
img="$TEMP_D/seed-data"
|
||||
tar_opts=( --owner=root --group=root )
|
||||
|
||||
case "$filesystem" in
|
||||
tar)
|
||||
tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${files_rel[@]}" ||
|
||||
fail "failed to create tarball of ${files_rel[*]}"
|
||||
;;
|
||||
tar-seed-local|tar-seed-net)
|
||||
if [ "$filesystem" = "tar-seed-local" ]; then
|
||||
path="var/lib/cloud/seed/nocloud"
|
||||
else
|
||||
path="var/lib/cloud/seed/nocloud-net"
|
||||
fi
|
||||
mkdir -p "${TEMP_D}/${path}" ||
|
||||
fail "failed making path for seed files"
|
||||
mv "${files[@]}" "${TEMP_D}/$path" ||
|
||||
fail "failed moving files"
|
||||
tar "${tar_opts[@]}" -C "${TEMP_D}" -cf "$img" "${path}" ||
|
||||
fail "failed to create tarball with $path"
|
||||
;;
|
||||
iso9660)
|
||||
mkisofs -output "$img" -volid cidata \
|
||||
-joliet -rock "${files[@]}" > "$TEMP_D/err" 2>&1 ||
|
||||
{ cat "$TEMP_D/err" 1>&2; fail "failed to mkisofs"; }
|
||||
;;
|
||||
vfat)
|
||||
truncate -s 128K "$img" || fail "failed truncate image"
|
||||
out=$(mkfs.vfat -n cidata "$img" 2>&1) ||
|
||||
{ error "failed: mkfs.vfat -n cidata $img"; error "$out"; }
|
||||
mcopy -oi "$img" "${files[@]}" :: ||
|
||||
fail "failed to copy user-data, meta-data to img"
|
||||
;;
|
||||
esac
|
||||
|
||||
[ "$output" = "-" ] && output="$TEMP_D/final"
|
||||
if [ "${diskformat#tar}" != "$diskformat" -o "$diskformat" = "raw" ]; then
|
||||
cp "$img" "$output" ||
|
||||
fail "failed to copy image to $output"
|
||||
else
|
||||
qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
|
||||
fail "failed to convert to disk format $diskformat"
|
||||
fi
|
||||
|
||||
[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
|
||||
fail "failed to write to -"
|
||||
|
||||
debug 1 "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
|
||||
# vi: ts=4 noexpandtab
|
@ -1,74 +1,21 @@
|
||||
use log::info;
|
||||
use libtest_mimic::{Arguments, Trial};
|
||||
|
||||
mod tests;
|
||||
use tests::IntegrationTest;
|
||||
|
||||
use clap::Parser;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[clap(author, version, about, long_about = None)]
|
||||
#[clap(propagate_version = true)]
|
||||
pub struct RunOptions {
|
||||
#[clap(short, long, value_parser)]
|
||||
tests: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct Cli {
|
||||
#[clap(subcommand)]
|
||||
command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
enum Command {
|
||||
/// Run one or more tests: ... -- run -t test1 -t test2
|
||||
Run(RunOptions),
|
||||
/// List all the tests: ... -- list
|
||||
List,
|
||||
}
|
||||
|
||||
macro_rules! exec_test {
|
||||
($test:expr) => {{
|
||||
info!("Running {}", $test.name);
|
||||
($test.test_fn)();
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! exec_all_tests {
|
||||
() => {{
|
||||
for t in inventory::iter::<IntegrationTest> {
|
||||
exec_test!(t)
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
fn main() {
|
||||
env_logger::init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
|
||||
match &cli.command {
|
||||
Some(Command::Run(opts)) => match &opts.tests {
|
||||
Some(tests) => {
|
||||
for t in inventory::iter::<IntegrationTest> {
|
||||
if tests.contains(&t.name.into()) {
|
||||
exec_test!(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
exec_all_tests!()
|
||||
}
|
||||
},
|
||||
Some(Command::List) => {
|
||||
for t in inventory::iter::<IntegrationTest> {
|
||||
info!("{}", t.name);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
exec_all_tests!()
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
let mut args = Arguments::from_args();
|
||||
// Force to run single-threaded
|
||||
args.test_threads = Some(1);
|
||||
let tests = inventory::iter::<IntegrationTest>
|
||||
.into_iter()
|
||||
.map(|test| {
|
||||
Trial::test(test.name, move || {
|
||||
(test.test_fn)();
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
libtest_mimic::run(&args, tests).exit();
|
||||
}
|
||||
|
@ -0,0 +1,114 @@
|
||||
use core::{mem::size_of, ptr::null_mut, slice::from_raw_parts};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use aya::include_bytes_aligned;
|
||||
use aya_obj::{generated::bpf_insn, Object, ProgramSection};
|
||||
|
||||
use super::{integration_test, IntegrationTest};
|
||||
|
||||
#[integration_test]
|
||||
fn run_with_rbpf() {
|
||||
let bytes = include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/pass");
|
||||
let object = Object::parse(bytes).unwrap();
|
||||
|
||||
assert_eq!(object.programs.len(), 1);
|
||||
assert!(matches!(
|
||||
object.programs["pass"].section,
|
||||
ProgramSection::Xdp { .. }
|
||||
));
|
||||
assert_eq!(object.programs["pass"].section.name(), "pass");
|
||||
|
||||
let instructions = &object.programs["pass"].function.instructions;
|
||||
let data = unsafe {
|
||||
from_raw_parts(
|
||||
instructions.as_ptr() as *const u8,
|
||||
instructions.len() * size_of::<bpf_insn>(),
|
||||
)
|
||||
};
|
||||
// Use rbpf interpreter instead of JIT compiler to ensure platform compatibility.
|
||||
let vm = rbpf::EbpfVmNoData::new(Some(data)).unwrap();
|
||||
const XDP_PASS: u64 = 2;
|
||||
assert_eq!(vm.execute_program().unwrap(), XDP_PASS);
|
||||
}
|
||||
|
||||
static mut MULTIMAP_MAPS: [*mut Vec<u64>; 2] = [null_mut(), null_mut()];
|
||||
|
||||
#[integration_test]
|
||||
fn use_map_with_rbpf() {
|
||||
let bytes =
|
||||
include_bytes_aligned!("../../../../target/bpfel-unknown-none/debug/multimap-btf.bpf.o");
|
||||
let mut object = Object::parse(bytes).unwrap();
|
||||
|
||||
assert_eq!(object.programs.len(), 1);
|
||||
assert!(matches!(
|
||||
object.programs["tracepoint"].section,
|
||||
ProgramSection::TracePoint { .. }
|
||||
));
|
||||
assert_eq!(object.programs["tracepoint"].section.name(), "tracepoint");
|
||||
|
||||
// Initialize maps:
|
||||
// - fd: 0xCAFE00 or 0xCAFE01 (the 0xCAFE00 part is used to distinguish fds from indices),
|
||||
// - Note that rbpf does not convert fds into real pointers,
|
||||
// so we keeps the pointers to our maps in MULTIMAP_MAPS, to be used in helpers.
|
||||
let mut maps = HashMap::new();
|
||||
let mut map_instances = vec![vec![0u64], vec![0u64]];
|
||||
for (name, map) in object.maps.iter() {
|
||||
assert_eq!(map.key_size(), size_of::<u32>() as u32);
|
||||
assert_eq!(map.value_size(), size_of::<u64>() as u32);
|
||||
assert_eq!(
|
||||
map.map_type(),
|
||||
aya_obj::generated::bpf_map_type::BPF_MAP_TYPE_ARRAY as u32
|
||||
);
|
||||
|
||||
let map_id = if name == "map_1" { 0 } else { 1 };
|
||||
let fd = map_id as i32 | 0xCAFE00;
|
||||
maps.insert(name.to_owned(), (fd, map.clone()));
|
||||
|
||||
unsafe {
|
||||
MULTIMAP_MAPS[map_id] = &mut map_instances[map_id] as *mut _;
|
||||
}
|
||||
}
|
||||
|
||||
object
|
||||
.relocate_maps(
|
||||
maps.iter()
|
||||
.map(|(s, (fd, map))| (s.as_ref() as &str, Some(*fd), map)),
|
||||
)
|
||||
.expect("Relocation failed");
|
||||
// Actually there is no local function call involved.
|
||||
object.relocate_calls().unwrap();
|
||||
|
||||
// Executes the program
|
||||
assert_eq!(object.programs.len(), 1);
|
||||
let instructions = &object.programs["tracepoint"].function.instructions;
|
||||
let data = unsafe {
|
||||
from_raw_parts(
|
||||
instructions.as_ptr() as *const u8,
|
||||
instructions.len() * size_of::<bpf_insn>(),
|
||||
)
|
||||
};
|
||||
let mut vm = rbpf::EbpfVmNoData::new(Some(data)).unwrap();
|
||||
vm.register_helper(2, bpf_map_update_elem_multimap)
|
||||
.expect("Helper failed");
|
||||
assert_eq!(vm.execute_program().unwrap(), 0);
|
||||
|
||||
assert_eq!(map_instances[0][0], 24);
|
||||
assert_eq!(map_instances[1][0], 42);
|
||||
|
||||
unsafe {
|
||||
MULTIMAP_MAPS[0] = null_mut();
|
||||
MULTIMAP_MAPS[1] = null_mut();
|
||||
}
|
||||
}
|
||||
|
||||
fn bpf_map_update_elem_multimap(map: u64, key: u64, value: u64, _: u64, _: u64) -> u64 {
|
||||
assert!(map == 0xCAFE00 || map == 0xCAFE01);
|
||||
let key = *unsafe { (key as usize as *const u32).as_ref().unwrap() };
|
||||
let value = *unsafe { (value as usize as *const u64).as_ref().unwrap() };
|
||||
assert_eq!(key, 0);
|
||||
unsafe {
|
||||
let map_instance = MULTIMAP_MAPS[map as usize & 0xFF].as_mut().unwrap();
|
||||
map_instance[0] = value;
|
||||
}
|
||||
0
|
||||
}
|
@ -0,0 +1,313 @@
|
||||
use anyhow::{Context, Result};
|
||||
use std::{path::PathBuf, process::Command, thread::sleep, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
|
||||
use aya::{maps::Array, programs::TracePoint, BpfLoader, Btf, Endianness};
|
||||
|
||||
use super::{integration_test, IntegrationTest};
|
||||
|
||||
// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no
|
||||
// special meaning, they just have "nice" bit patterns that can be helpful while debugging.
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_field() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
struct foo {
|
||||
__u8 a;
|
||||
__u8 b;
|
||||
__u8 c;
|
||||
__u8 d;
|
||||
};
|
||||
"#,
|
||||
target_btf: r#"
|
||||
struct foo {
|
||||
__u8 a;
|
||||
__u8 c;
|
||||
__u8 b;
|
||||
__u8 d;
|
||||
} s1;
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
__u8 memory[] = {1, 2, 3, 4};
|
||||
struct foo *ptr = (struct foo *) &memory;
|
||||
value = __builtin_preserve_access_index(ptr->c);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap(), 2);
|
||||
assert_eq!(test.run_no_btf().unwrap(), 3);
|
||||
}
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_enum() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
enum foo { D = 0xAAAAAAAA };
|
||||
"#,
|
||||
target_btf: r#"
|
||||
enum foo { D = 0xBBBBBBBB } e1;
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
#define BPF_ENUMVAL_VALUE 1
|
||||
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap(), 0xBBBBBBBB);
|
||||
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA);
|
||||
}
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_enum_signed() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
enum foo { D = -0x7AAAAAAA };
|
||||
"#,
|
||||
target_btf: r#"
|
||||
enum foo { D = -0x7BBBBBBB } e1;
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
#define BPF_ENUMVAL_VALUE 1
|
||||
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64);
|
||||
assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64);
|
||||
}
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_enum64() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
enum foo { D = 0xAAAAAAAABBBBBBBB };
|
||||
"#,
|
||||
target_btf: r#"
|
||||
enum foo { D = 0xCCCCCCCCDDDDDDDD } e1;
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
#define BPF_ENUMVAL_VALUE 1
|
||||
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD);
|
||||
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB);
|
||||
}
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_enum64_signed() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
enum foo { D = -0xAAAAAAABBBBBBBB };
|
||||
"#,
|
||||
target_btf: r#"
|
||||
enum foo { D = -0xCCCCCCCDDDDDDDD } e1;
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
#define BPF_ENUMVAL_VALUE 1
|
||||
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64);
|
||||
assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64);
|
||||
}
|
||||
|
||||
#[integration_test]
|
||||
fn relocate_pointer() {
|
||||
let test = RelocationTest {
|
||||
local_definition: r#"
|
||||
struct foo {};
|
||||
struct bar { struct foo *f; };
|
||||
"#,
|
||||
target_btf: r#"
|
||||
struct foo {};
|
||||
struct bar { struct foo *f; };
|
||||
"#,
|
||||
relocation_code: r#"
|
||||
__u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0};
|
||||
struct bar* ptr = (struct bar *) &memory;
|
||||
value = (__u64) __builtin_preserve_access_index(ptr->f);
|
||||
"#,
|
||||
}
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(test.run().unwrap(), 42);
|
||||
assert_eq!(test.run_no_btf().unwrap(), 42);
|
||||
}
|
||||
|
||||
/// Utility code for running relocation tests:
|
||||
/// - Generates the eBPF program using probided local definition and relocation code
|
||||
/// - Generates the BTF from the target btf code
|
||||
struct RelocationTest {
|
||||
/// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode
|
||||
local_definition: &'static str,
|
||||
/// Target data structure definition. What the vmlinux would actually contain.
|
||||
target_btf: &'static str,
|
||||
/// Code executed by the eBPF program to test the relocation.
|
||||
/// The format should be:
|
||||
// __u8 memory[] = { ... };
|
||||
// __u32 value = BPF_CORE_READ((struct foo *)&memory, ...);
|
||||
//
|
||||
// The generated code will be executed by attaching a tracepoint to sched_switch
|
||||
// and emitting `__u32 value` an a map. See the code template below for more details.
|
||||
relocation_code: &'static str,
|
||||
}
|
||||
|
||||
impl RelocationTest {
|
||||
/// Build a RelocationTestRunner
|
||||
fn build(&self) -> Result<RelocationTestRunner> {
|
||||
Ok(RelocationTestRunner {
|
||||
ebpf: self.build_ebpf()?,
|
||||
btf: self.build_btf()?,
|
||||
})
|
||||
}
|
||||
|
||||
/// - Generate the source eBPF filling a template
|
||||
/// - Compile it with clang
|
||||
fn build_ebpf(&self) -> Result<Vec<u8>> {
|
||||
let local_definition = self.local_definition;
|
||||
let relocation_code = self.relocation_code;
|
||||
let (_tmp_dir, compiled_file) = compile(&format!(
|
||||
r#"
|
||||
#include <linux/bpf.h>
|
||||
|
||||
static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2;
|
||||
|
||||
{local_definition}
|
||||
|
||||
struct {{
|
||||
int (*type)[BPF_MAP_TYPE_ARRAY];
|
||||
__u32 *key;
|
||||
__u64 *value;
|
||||
int (*max_entries)[1];
|
||||
}} output_map
|
||||
__attribute__((section(".maps"), used));
|
||||
|
||||
__attribute__((section("tracepoint/bpf_prog"), used))
|
||||
int bpf_prog(void *ctx) {{
|
||||
__u32 key = 0;
|
||||
__u64 value = 0;
|
||||
{relocation_code}
|
||||
bpf_map_update_elem(&output_map, &key, &value, BPF_ANY);
|
||||
return 0;
|
||||
}}
|
||||
|
||||
char _license[] __attribute__((section("license"), used)) = "GPL";
|
||||
"#
|
||||
))
|
||||
.context("Failed to compile eBPF program")?;
|
||||
let bytecode =
|
||||
std::fs::read(compiled_file).context("Error reading compiled eBPF program")?;
|
||||
Ok(bytecode)
|
||||
}
|
||||
|
||||
/// - Generate the target BTF source with a mock main()
|
||||
/// - Compile it with clang
|
||||
/// - Extract the BTF with llvm-objcopy
|
||||
fn build_btf(&self) -> Result<Btf> {
|
||||
let target_btf = self.target_btf;
|
||||
let relocation_code = self.relocation_code;
|
||||
// BTF files can be generated and inspected with these commands:
|
||||
// $ clang -c -g -O2 -target bpf target.c
|
||||
// $ pahole --btf_encode_detached=target.btf -V target.o
|
||||
// $ bpftool btf dump file ./target.btf format c
|
||||
let (tmp_dir, compiled_file) = compile(&format!(
|
||||
r#"
|
||||
#include <linux/bpf.h>
|
||||
|
||||
{target_btf}
|
||||
int main() {{
|
||||
__u64 value = 0;
|
||||
// This is needed to make sure to emit BTF for the defined types,
|
||||
// it could be dead code eliminated if we don't.
|
||||
{relocation_code};
|
||||
return value;
|
||||
}}
|
||||
"#
|
||||
))
|
||||
.context("Failed to compile BTF")?;
|
||||
Command::new("llvm-objcopy")
|
||||
.current_dir(tmp_dir.path())
|
||||
.args(["--dump-section", ".BTF=target.btf"])
|
||||
.arg(compiled_file)
|
||||
.status()
|
||||
.context("Failed to run llvm-objcopy")?
|
||||
.success()
|
||||
.then_some(())
|
||||
.context("Failed to extract BTF")?;
|
||||
let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default())
|
||||
.context("Error parsing generated BTF")?;
|
||||
Ok(btf)
|
||||
}
|
||||
}
|
||||
|
||||
/// Compile an eBPF program and return the path of the compiled object.
|
||||
/// Also returns a TempDir handler, dropping it will clear the created dicretory.
|
||||
fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> {
|
||||
let tmp_dir = tempfile::tempdir().context("Error making temp dir")?;
|
||||
let source = tmp_dir.path().join("source.c");
|
||||
std::fs::write(&source, source_code).context("Writing bpf program failed")?;
|
||||
Command::new("clang")
|
||||
.current_dir(&tmp_dir)
|
||||
.args(["-c", "-g", "-O2", "-target", "bpf"])
|
||||
.arg(&source)
|
||||
.status()
|
||||
.context("Failed to run clang")?
|
||||
.success()
|
||||
.then_some(())
|
||||
.context("Failed to compile eBPF source")?;
|
||||
Ok((tmp_dir, source.with_extension("o")))
|
||||
}
|
||||
|
||||
struct RelocationTestRunner {
|
||||
ebpf: Vec<u8>,
|
||||
btf: Btf,
|
||||
}
|
||||
|
||||
impl RelocationTestRunner {
|
||||
/// Run test and return the output value
|
||||
fn run(&self) -> Result<u64> {
|
||||
self.run_internal(true).context("Error running with BTF")
|
||||
}
|
||||
|
||||
/// Run without loading btf
|
||||
fn run_no_btf(&self) -> Result<u64> {
|
||||
self.run_internal(false)
|
||||
.context("Error running without BTF")
|
||||
}
|
||||
|
||||
fn run_internal(&self, with_relocations: bool) -> Result<u64> {
|
||||
let mut loader = BpfLoader::new();
|
||||
if with_relocations {
|
||||
loader.btf(Some(&self.btf));
|
||||
} else {
|
||||
loader.btf(None);
|
||||
}
|
||||
let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?;
|
||||
let program: &mut TracePoint = bpf
|
||||
.program_mut("bpf_prog")
|
||||
.context("bpf_prog not found")?
|
||||
.try_into()
|
||||
.context("program not a tracepoint")?;
|
||||
program.load().context("Loading tracepoint failed")?;
|
||||
// Attach to sched_switch and wait some time to make sure it executed at least once
|
||||
program
|
||||
.attach("sched", "sched_switch")
|
||||
.context("attach failed")?;
|
||||
sleep(Duration::from_millis(1000));
|
||||
// To inspect the loaded eBPF bytecode, increse the timeout and run:
|
||||
// $ sudo bpftool prog dump xlated name bpf_prog
|
||||
|
||||
let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap();
|
||||
let key = 0;
|
||||
output_map.get(&key, 0).context("Getting key 0 failed")
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue