Merge pull request #644 from aya-rs/build-script

Steps toward hermetic integration tests
reviewable/pr629/r18
Tamir Duberstein 1 year ago committed by GitHub
commit 7def6d7218
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -9,11 +9,6 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: actions/checkout@v3
with:
repository: libbpf/libbpf
path: libbpf
- name: libbpf-version - name: libbpf-version
working-directory: libbpf working-directory: libbpf
run: echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV run: echo "LIBBPF_SHA=$(git rev-parse HEAD)" >> $GITHUB_ENV
@ -32,7 +27,7 @@ jobs:
- name: Run codegen - name: Run codegen
run: | run: |
cargo xtask codegen --libbpf-dir ./libbpf cargo xtask codegen
- name: Check for changes - name: Check for changes
run: | run: |

@ -17,11 +17,9 @@ jobs:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
repository: libbpf/libbpf submodules: recursive
path: libbpf
- name: Install Pre-requisites - name: Install Pre-requisites
run: | run: |
@ -40,4 +38,4 @@ jobs:
key: tmp-files-${{ hashFiles('test/run.sh') }} key: tmp-files-${{ hashFiles('test/run.sh') }}
- name: Run integration tests - name: Run integration tests
run: test/run.sh ./libbpf run: test/run.sh

@ -29,7 +29,7 @@ jobs:
run: cargo fmt --all -- --check run: cargo fmt --all -- --check
- name: Run clippy - name: Run clippy
run: cargo clippy --all-targets --workspace --exclude integration-test -- --deny warnings run: cargo clippy --all-targets --workspace -- --deny warnings
- name: Run miri - name: Run miri
run: cargo miri test --all-targets run: cargo miri test --all-targets

1
.gitignore vendored

@ -1,6 +1,5 @@
Cargo.lock Cargo.lock
target/ target/
libbpf/
.vscode/ .vscode/
!.vscode/settings.json !.vscode/settings.json
site/ site/

3
.gitmodules vendored

@ -0,0 +1,3 @@
[submodule "libbpf"]
path = libbpf
url = https://github.com/libbpf/libbpf

@ -1,4 +1,4 @@
{ {
"rust-analyzer.checkOnSave.allTargets": false, "rust-analyzer.check.allTargets": true,
"rust-analyzer.checkOnSave.command": "clippy" "rust-analyzer.check.command": "clippy"
} }

@ -299,7 +299,7 @@ macro_rules! include_bytes_aligned {
pub bytes: Bytes, pub bytes: Bytes,
} }
static ALIGNED: &Aligned<[u8]> = &Aligned { const ALIGNED: &Aligned<[u8]> = &Aligned {
_align: [], _align: [],
bytes: *include_bytes!($path), bytes: *include_bytes!($path),
}; };

@ -1,6 +0,0 @@
[build]
target-dir = "../target"
target = "bpfel-unknown-none"
[unstable]
build-std = ["core"]

@ -0,0 +1 @@
Subproject commit a2258003f21d9d52afd48aa64787b65ef80bd355

@ -30,13 +30,13 @@ From the root of this repository:
### Native ### Native
``` ```
cargo xtask integration-test --libbpf-dir /path/to/libbpf cargo xtask integration-test
``` ```
### Virtualized ### Virtualized
``` ```
./test/run.sh /path/to/libbpf ./test/run.sh
``` ```
### Writing an integration test ### Writing an integration test
@ -44,10 +44,11 @@ cargo xtask integration-test --libbpf-dir /path/to/libbpf
Tests should follow these guidelines: Tests should follow these guidelines:
- Rust eBPF code should live in `integration-ebpf/${NAME}.rs` and included in - Rust eBPF code should live in `integration-ebpf/${NAME}.rs` and included in
`integration-ebpf/Cargo.toml`. `integration-ebpf/Cargo.toml` and `integration-test/src/lib.rs` using
- C eBPF code should live in `integration-ebpf/src/bpf/${NAME}.bpf.c`. It's automatically compiled `include_bytes_aligned!`.
and made available as `${OUT_DIR}/${NAME}.bpf.o`. - C eBPF code should live in `integration-test/bpf/${NAME}.bpf.c`. It should be
- Any bytecode should be included in the integration test binary using `include_bytes_aligned!`. added to the list of files in `integration-test/build.rs` and the list of
constants in `integration-test/src/lib.rs` using `include_bytes_aligned!`.
- Tests should be added to `integration-test/tests`. - Tests should be added to `integration-test/tests`.
- You may add a new module, or use an existing one. - You may add a new module, or use an existing one.
- Test functions should not return `anyhow::Result<()>` since this produces errors without stack - Test functions should not return `anyhow::Result<()>` since this produces errors without stack

@ -1,6 +0,0 @@
[build]
target-dir = "../../target"
target = "bpfel-unknown-none"
[unstable]
build-std = ["core"]

@ -1,2 +0,0 @@
[toolchain]
channel="nightly"

@ -18,5 +18,7 @@ object = { version = "0.31", default-features = false, features = [
"elf", "elf",
] } ] }
rbpf = "0.2.0" rbpf = "0.2.0"
tempfile = "3.3.0"
tokio = { version = "1.24", default-features = false, features = ["time"] } tokio = { version = "1.24", default-features = false, features = ["time"] }
[build-dependencies]
cargo_metadata = "0.15.4"

@ -0,0 +1,196 @@
use std::{
env,
ffi::OsString,
fmt::Write as _,
fs,
io::BufReader,
path::PathBuf,
process::{Child, Command, Stdio},
};
use cargo_metadata::{
Artifact, CompilerMessage, Message, Metadata, MetadataCommand, Package, Target,
};
fn main() {
const AYA_BUILD_INTEGRATION_BPF: &str = "AYA_BUILD_INTEGRATION_BPF";
println!("cargo:rerun-if-env-changed={}", AYA_BUILD_INTEGRATION_BPF);
let build_integration_bpf = match env::var_os(AYA_BUILD_INTEGRATION_BPF) {
None => false,
Some(s) => {
let s = s.to_str().unwrap();
s.parse::<bool>().unwrap()
}
};
let manifest_dir = env::var_os("CARGO_MANIFEST_DIR").unwrap();
let manifest_dir = PathBuf::from(manifest_dir);
let out_dir = env::var_os("OUT_DIR").unwrap();
let out_dir = PathBuf::from(out_dir);
let endian = env::var_os("CARGO_CFG_TARGET_ENDIAN").unwrap();
let target = if endian == "big" {
"bpfeb"
} else if endian == "little" {
"bpfel"
} else {
panic!("unsupported endian={:?}", endian)
};
const C_BPF_PROBES: &[(&str, &str)] = &[
("ext.bpf.c", "ext.bpf.o"),
("main.bpf.c", "main.bpf.o"),
("multimap-btf.bpf.c", "multimap-btf.bpf.o"),
("text_64_64_reloc.c", "text_64_64_reloc.o"),
];
let c_bpf_probes = C_BPF_PROBES
.iter()
.map(|(src, dst)| (src, out_dir.join(dst)));
if build_integration_bpf {
let libbpf_dir = manifest_dir
.parent()
.unwrap()
.parent()
.unwrap()
.join("libbpf");
let libbpf_headers_dir = out_dir.join("libbpf_headers");
let mut includedir = OsString::new();
includedir.push("INCLUDEDIR=");
includedir.push(&libbpf_headers_dir);
let mut cmd = Command::new("make");
cmd.arg("-C")
.arg(libbpf_dir.join("src"))
.arg(includedir)
.arg("install_headers");
let status = cmd
.status()
.unwrap_or_else(|err| panic!("failed to run {cmd:?}: {err}"));
match status.code() {
Some(code) => match code {
0 => {}
code => panic!("{cmd:?} exited with code {code}"),
},
None => panic!("{cmd:?} terminated by signal"),
}
let bpf_dir = manifest_dir.join("bpf");
let mut target_arch = OsString::new();
target_arch.push("-D__TARGET_ARCH_");
let arch = env::var_os("CARGO_CFG_TARGET_ARCH").unwrap();
if arch == "x86_64" {
target_arch.push("x86");
} else if arch == "aarch64" {
target_arch.push("arm64");
} else {
target_arch.push(arch);
};
for (src, dst) in c_bpf_probes {
let src = bpf_dir.join(src);
let mut cmd = Command::new("clang");
cmd.arg("-I")
.arg(&libbpf_headers_dir)
.args(["-g", "-O2", "-target", target, "-c"])
.arg(&target_arch)
.arg(src)
.arg("-o")
.arg(dst);
let status = cmd
.status()
.unwrap_or_else(|err| panic!("failed to run {cmd:?}: {err}"));
match status.code() {
Some(code) => match code {
0 => {}
code => panic!("{cmd:?} exited with code {code}"),
},
None => panic!("{cmd:?} terminated by signal"),
}
}
let ebpf_dir = manifest_dir.parent().unwrap().join("integration-ebpf");
let target = format!("{target}-unknown-none");
let mut cmd = Command::new("cargo");
cmd.current_dir(&ebpf_dir).args([
"build",
"-Z",
"build-std=core",
"--release",
"--message-format=json",
"--target",
&target,
]);
let mut child = cmd
.stdout(Stdio::piped())
.spawn()
.unwrap_or_else(|err| panic!("failed to spawn {cmd:?}: {err}"));
let Child { stdout, .. } = &mut child;
let stdout = stdout.take().unwrap();
let reader = BufReader::new(stdout);
let mut executables = Vec::new();
let mut compiler_messages = String::new();
for message in Message::parse_stream(reader) {
#[allow(clippy::collapsible_match)]
match message.expect("valid JSON") {
Message::CompilerArtifact(Artifact {
executable,
target: Target { name, .. },
..
}) => {
if let Some(executable) = executable {
executables.push((name, executable.into_std_path_buf()));
}
}
Message::CompilerMessage(CompilerMessage { message, .. }) => {
writeln!(&mut compiler_messages, "{message}").unwrap()
}
_ => {}
}
}
let status = child
.wait()
.unwrap_or_else(|err| panic!("failed to wait for {cmd:?}: {err}"));
match status.code() {
Some(code) => match code {
0 => {}
code => panic!("{cmd:?} exited with status code {code}:\n{compiler_messages}"),
},
None => panic!("{cmd:?} terminated by signal"),
}
for (name, binary) in executables {
let dst = out_dir.join(name);
let _: u64 = fs::copy(&binary, &dst)
.unwrap_or_else(|err| panic!("failed to copy {binary:?} to {dst:?}: {err}"));
}
} else {
for (_src, dst) in c_bpf_probes {
fs::write(&dst, []).unwrap_or_else(|err| panic!("failed to create {dst:?}: {err}"));
}
let Metadata { packages, .. } = MetadataCommand::new().no_deps().exec().unwrap();
for Package { name, targets, .. } in packages {
if name != "integration-ebpf" {
continue;
}
for Target { name, kind, .. } in targets {
if kind != ["bin"] {
continue;
}
let dst = out_dir.join(name);
fs::write(&dst, []).unwrap_or_else(|err| panic!("failed to create {dst:?}: {err}"));
}
}
}
}

@ -1 +1,20 @@
use aya::include_bytes_aligned;
pub const EXT: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/ext.bpf.o"));
pub const MAIN: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/main.bpf.o"));
pub const MULTIMAP_BTF: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/multimap-btf.bpf.o"));
pub const TEXT_64_64_RELOC: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/text_64_64_reloc.o"));
pub const LOG: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/log"));
pub const MAP_TEST: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/map_test"));
pub const NAME_TEST: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/name_test"));
pub const PASS: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/pass"));
pub const TEST: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/test"));
pub const RELOCATIONS: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/relocations"));
pub const BPF_PROBE_READ: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/bpf_probe_read"));
#[cfg(test)]
mod tests;

@ -0,0 +1,8 @@
mod bpf_probe_read;
mod btf_relocations;
mod elf;
mod load;
mod log;
mod rbpf;
mod relocations;
mod smoke;

@ -1,4 +1,4 @@
use aya::{include_bytes_aligned, maps::Array, programs::UProbe, Bpf}; use aya::{maps::Array, programs::UProbe, Bpf};
const RESULT_BUF_LEN: usize = 1024; const RESULT_BUF_LEN: usize = 1024;
@ -68,7 +68,7 @@ fn set_user_buffer(bytes: &[u8], dest_len: usize) -> Bpf {
let bpf = load_and_attach_uprobe( let bpf = load_and_attach_uprobe(
"test_bpf_probe_read_user_str_bytes", "test_bpf_probe_read_user_str_bytes",
"trigger_bpf_probe_read_user", "trigger_bpf_probe_read_user",
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/bpf_probe_read"), crate::BPF_PROBE_READ,
); );
trigger_bpf_probe_read_user(bytes.as_ptr(), dest_len); trigger_bpf_probe_read_user(bytes.as_ptr(), dest_len);
bpf bpf
@ -78,7 +78,7 @@ fn set_kernel_buffer(bytes: &[u8], dest_len: usize) -> Bpf {
let mut bpf = load_and_attach_uprobe( let mut bpf = load_and_attach_uprobe(
"test_bpf_probe_read_kernel_str_bytes", "test_bpf_probe_read_kernel_str_bytes",
"trigger_bpf_probe_read_kernel", "trigger_bpf_probe_read_kernel",
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/bpf_probe_read"), crate::BPF_PROBE_READ,
); );
set_kernel_buffer_element(&mut bpf, bytes); set_kernel_buffer_element(&mut bpf, bytes);
trigger_bpf_probe_read_kernel(dest_len); trigger_bpf_probe_read_kernel(dest_len);

@ -1,6 +1,9 @@
use anyhow::{bail, Context as _, Result}; use anyhow::{anyhow, bail, Context as _, Result};
use std::{path::PathBuf, process::Command, thread::sleep, time::Duration}; use std::{
use tempfile::TempDir; process::{Child, ChildStdout, Command, Stdio},
thread::sleep,
time::Duration,
};
use aya::{maps::Array, programs::TracePoint, util::KernelVersion, BpfLoader, Btf, Endianness}; use aya::{maps::Array, programs::TracePoint, util::KernelVersion, BpfLoader, Btf, Endianness};
@ -215,9 +218,15 @@ impl RelocationTest {
/// - Generate the source eBPF filling a template /// - Generate the source eBPF filling a template
/// - Compile it with clang /// - Compile it with clang
fn build_ebpf(&self) -> Result<Vec<u8>> { fn build_ebpf(&self) -> Result<Vec<u8>> {
let local_definition = self.local_definition; use std::io::Read as _;
let relocation_code = self.relocation_code;
let (_tmp_dir, compiled_file) = compile(&format!( let Self {
local_definition,
relocation_code,
..
} = self;
let mut stdout = compile(&format!(
r#" r#"
#include <linux/bpf.h> #include <linux/bpf.h>
@ -250,23 +259,29 @@ impl RelocationTest {
char _license[] __attribute__((section("license"), used)) = "GPL"; char _license[] __attribute__((section("license"), used)) = "GPL";
"# "#
)) ))
.context("Failed to compile eBPF program")?; .context("failed to compile eBPF program")?;
let bytecode = let mut output = Vec::new();
std::fs::read(compiled_file).context("Error reading compiled eBPF program")?; stdout.read_to_end(&mut output)?;
Ok(bytecode) Ok(output)
} }
/// - Generate the target BTF source with a mock main() /// - Generate the target BTF source with a mock main()
/// - Compile it with clang /// - Compile it with clang
/// - Extract the BTF with llvm-objcopy /// - Extract the BTF with llvm-objcopy
fn build_btf(&self) -> Result<Btf> { fn build_btf(&self) -> Result<Btf> {
let target_btf = self.target_btf; use std::io::Read as _;
let relocation_code = self.relocation_code;
let Self {
target_btf,
relocation_code,
..
} = self;
// BTF files can be generated and inspected with these commands: // BTF files can be generated and inspected with these commands:
// $ clang -c -g -O2 -target bpf target.c // $ clang -c -g -O2 -target bpf target.c
// $ pahole --btf_encode_detached=target.btf -V target.o // $ pahole --btf_encode_detached=target.btf -V target.o
// $ bpftool btf dump file ./target.btf format c // $ bpftool btf dump file ./target.btf format c
let (tmp_dir, compiled_file) = compile(&format!( let stdout = compile(&format!(
r#" r#"
#include <linux/bpf.h> #include <linux/bpf.h>
@ -280,14 +295,20 @@ impl RelocationTest {
}} }}
"# "#
)) ))
.context("Failed to compile BTF")?; .context("failed to compile BTF")?;
let mut cmd = Command::new("llvm-objcopy"); let mut cmd = Command::new("llvm-objcopy");
cmd.current_dir(tmp_dir.path()) cmd.args(["--dump-section", ".BTF=-", "-"])
.args(["--dump-section", ".BTF=target.btf"]) .stdin(stdout)
.arg(compiled_file); .stdout(Stdio::piped());
let status = cmd let mut child = cmd
.status() .spawn()
.with_context(|| format!("Failed to run {cmd:?}"))?; .with_context(|| format!("failed to spawn {cmd:?}"))?;
let Child { stdout, .. } = &mut child;
let mut stdout = stdout.take().ok_or(anyhow!("failed to open stdout"))?;
let status = child
.wait()
.with_context(|| format!("failed to wait for {cmd:?}"))?;
match status.code() { match status.code() {
Some(code) => match code { Some(code) => match code {
0 => {} 0 => {}
@ -295,25 +316,39 @@ impl RelocationTest {
}, },
None => bail!("{cmd:?} terminated by signal"), None => bail!("{cmd:?} terminated by signal"),
} }
let btf = Btf::parse_file(tmp_dir.path().join("target.btf"), Endianness::default())
.context("Error parsing generated BTF")?; let mut output = Vec::new();
Ok(btf) stdout.read_to_end(&mut output)?;
Btf::parse(output.as_slice(), Endianness::default())
.context("failed to parse generated BTF")
} }
} }
/// Compile an eBPF program and return the path of the compiled object. /// Compile an eBPF program and return its bytes.
/// Also returns a TempDir handler, dropping it will clear the created dicretory. fn compile(source_code: &str) -> Result<ChildStdout> {
fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> { use std::io::Write as _;
let tmp_dir = tempfile::tempdir().context("Error making temp dir")?;
let source = tmp_dir.path().join("source.c");
std::fs::write(&source, source_code).context("Writing bpf program failed")?;
let mut cmd = Command::new("clang"); let mut cmd = Command::new("clang");
cmd.current_dir(&tmp_dir) cmd.args([
.args(["-c", "-g", "-O2", "-target", "bpf"]) "-c", "-g", "-O2", "-target", "bpf", "-x", "c", "-", "-o", "-",
.arg(&source); ])
let status = cmd .stdin(Stdio::piped())
.status() .stdout(Stdio::piped());
.with_context(|| format!("Failed to run {cmd:?}"))?; let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn {cmd:?}"))?;
let Child { stdin, stdout, .. } = &mut child;
{
let mut stdin = stdin.take().ok_or(anyhow!("failed to open stdin"))?;
stdin
.write_all(source_code.as_bytes())
.context("failed to write to stdin")?;
}
let stdout = stdout.take().ok_or(anyhow!("failed to open stdout"))?;
let status = child
.wait()
.with_context(|| format!("failed to wait for {cmd:?}"))?;
match status.code() { match status.code() {
Some(code) => match code { Some(code) => match code {
0 => {} 0 => {}
@ -321,7 +356,7 @@ fn compile(source_code: &str) -> Result<(TempDir, PathBuf)> {
}, },
None => bail!("{cmd:?} terminated by signal"), None => bail!("{cmd:?} terminated by signal"),
} }
Ok((tmp_dir, source.with_extension("o"))) Ok(stdout)
} }
struct RelocationTestRunner { struct RelocationTestRunner {

@ -1,10 +1,8 @@
use aya::include_bytes_aligned;
use object::{Object, ObjectSymbol}; use object::{Object, ObjectSymbol};
#[test] #[test]
fn test_maps() { fn test_maps() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/map_test"); let obj_file = object::File::parse(crate::MAP_TEST).unwrap();
let obj_file = object::File::parse(bytes).unwrap();
if obj_file.section_by_name("maps").is_none() { if obj_file.section_by_name("maps").is_none() {
panic!("No 'maps' ELF section"); panic!("No 'maps' ELF section");
} }

@ -1,7 +1,6 @@
use std::{convert::TryInto as _, thread, time}; use std::{convert::TryInto as _, thread, time};
use aya::{ use aya::{
include_bytes_aligned,
maps::Array, maps::Array,
programs::{ programs::{
links::{FdLink, PinnedLink}, links::{FdLink, PinnedLink},
@ -16,8 +15,7 @@ const RETRY_DURATION_MS: u64 = 10;
#[test] #[test]
fn long_name() { fn long_name() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/name_test"); let mut bpf = Bpf::load(crate::NAME_TEST).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let name_prog: &mut Xdp = bpf let name_prog: &mut Xdp = bpf
.program_mut("ihaveaverylongname") .program_mut("ihaveaverylongname")
.unwrap() .unwrap()
@ -33,9 +31,7 @@ fn long_name() {
#[test] #[test]
fn multiple_btf_maps() { fn multiple_btf_maps() {
let bytes = let mut bpf = Bpf::load(crate::MULTIMAP_BTF).unwrap();
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o");
let mut bpf = Bpf::load(bytes).unwrap();
let map_1: Array<_, u64> = bpf.take_map("map_1").unwrap().try_into().unwrap(); let map_1: Array<_, u64> = bpf.take_map("map_1").unwrap().try_into().unwrap();
let map_2: Array<_, u64> = bpf.take_map("map_2").unwrap().try_into().unwrap(); let map_2: Array<_, u64> = bpf.take_map("map_2").unwrap().try_into().unwrap();
@ -71,8 +67,7 @@ macro_rules! assert_loaded {
#[test] #[test]
fn unload_xdp() { fn unload_xdp() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(crate::TEST).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut Xdp = bpf let prog: &mut Xdp = bpf
.program_mut("test_unload_xdp") .program_mut("test_unload_xdp")
.unwrap() .unwrap()
@ -101,8 +96,7 @@ fn unload_xdp() {
#[test] #[test]
fn unload_kprobe() { fn unload_kprobe() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(crate::TEST).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut KProbe = bpf let prog: &mut KProbe = bpf
.program_mut("test_unload_kpr") .program_mut("test_unload_kpr")
.unwrap() .unwrap()
@ -137,8 +131,7 @@ fn pin_link() {
return; return;
} }
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/test"); let mut bpf = Bpf::load(crate::TEST).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let prog: &mut Xdp = bpf let prog: &mut Xdp = bpf
.program_mut("test_unload_xdp") .program_mut("test_unload_xdp")
.unwrap() .unwrap()
@ -173,11 +166,9 @@ fn pin_lifecycle() {
return; return;
} }
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/pass");
// 1. Load Program and Pin // 1. Load Program and Pin
{ {
let mut bpf = Bpf::load(bytes).unwrap(); let mut bpf = Bpf::load(crate::PASS).unwrap();
let prog: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); let prog: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
prog.load().unwrap(); prog.load().unwrap();
prog.pin("/sys/fs/bpf/aya-xdp-test-prog").unwrap(); prog.pin("/sys/fs/bpf/aya-xdp-test-prog").unwrap();
@ -211,7 +202,7 @@ fn pin_lifecycle() {
// 4. Load a new version of the program, unpin link, and atomically replace old program // 4. Load a new version of the program, unpin link, and atomically replace old program
{ {
let mut bpf = Bpf::load(bytes).unwrap(); let mut bpf = Bpf::load(crate::PASS).unwrap();
let prog: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); let prog: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
prog.load().unwrap(); prog.load().unwrap();

@ -1,6 +1,6 @@
use std::sync::{Arc, LockResult, Mutex, MutexGuard}; use std::sync::{Arc, LockResult, Mutex, MutexGuard};
use aya::{include_bytes_aligned, programs::UProbe, Bpf}; use aya::{programs::UProbe, Bpf};
use aya_log::BpfLogger; use aya_log::BpfLogger;
use log::{Level, Log, Record}; use log::{Level, Log, Record};
use tokio::time::{sleep, Duration}; use tokio::time::{sleep, Duration};
@ -89,8 +89,7 @@ impl Log for TestingLogger {
#[tokio::test] #[tokio::test]
async fn log() { async fn log() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/log"); let mut bpf = Bpf::load(crate::LOG).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let (logger, captured_logs) = TestingLogger::with_capacity(5); let (logger, captured_logs) = TestingLogger::with_capacity(5);
BpfLogger::init_with_logger(&mut bpf, logger).unwrap(); BpfLogger::init_with_logger(&mut bpf, logger).unwrap();

@ -1,13 +1,11 @@
use core::{mem::size_of, ptr::null_mut, slice::from_raw_parts}; use core::{mem::size_of, ptr::null_mut, slice::from_raw_parts};
use std::collections::HashMap; use std::collections::HashMap;
use aya::include_bytes_aligned;
use aya_obj::{generated::bpf_insn, Object, ProgramSection}; use aya_obj::{generated::bpf_insn, Object, ProgramSection};
#[test] #[test]
fn run_with_rbpf() { fn run_with_rbpf() {
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/pass"); let object = Object::parse(crate::PASS).unwrap();
let object = Object::parse(bytes).unwrap();
assert_eq!(object.programs.len(), 1); assert_eq!(object.programs.len(), 1);
matches::assert_matches!(object.programs["pass"].section, ProgramSection::Xdp { .. }); matches::assert_matches!(object.programs["pass"].section, ProgramSection::Xdp { .. });
@ -34,9 +32,7 @@ static mut MULTIMAP_MAPS: [*mut Vec<u64>; 2] = [null_mut(), null_mut()];
#[test] #[test]
fn use_map_with_rbpf() { fn use_map_with_rbpf() {
let bytes = let mut object = Object::parse(crate::MULTIMAP_BTF).unwrap();
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/multimap-btf.bpf.o");
let mut object = Object::parse(bytes).unwrap();
assert_eq!(object.programs.len(), 1); assert_eq!(object.programs.len(), 1);
matches::assert_matches!( matches::assert_matches!(

@ -1,13 +1,10 @@
use std::time::Duration; use std::time::Duration;
use aya::{include_bytes_aligned, programs::UProbe, Bpf}; use aya::{programs::UProbe, Bpf};
#[test] #[test]
fn relocations() { fn relocations() {
let bpf = load_and_attach( let bpf = load_and_attach("test_64_32_call_relocs", crate::RELOCATIONS);
"test_64_32_call_relocs",
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/relocations"),
);
trigger_relocations_program(); trigger_relocations_program();
std::thread::sleep(Duration::from_millis(100)); std::thread::sleep(Duration::from_millis(100));
@ -20,10 +17,7 @@ fn relocations() {
#[test] #[test]
fn text_64_64_reloc() { fn text_64_64_reloc() {
let mut bpf = load_and_attach( let mut bpf = load_and_attach("test_text_64_64_reloc", crate::TEXT_64_64_RELOC);
"test_text_64_64_reloc",
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/text_64_64_reloc.o"),
);
let mut m = aya::maps::Array::<_, u64>::try_from(bpf.map_mut("RESULTS").unwrap()).unwrap(); let mut m = aya::maps::Array::<_, u64>::try_from(bpf.map_mut("RESULTS").unwrap()).unwrap();
m.set(0, 1, 0).unwrap(); m.set(0, 1, 0).unwrap();

@ -1,5 +1,4 @@
use aya::{ use aya::{
include_bytes_aligned,
programs::{Extension, Xdp, XdpFlags}, programs::{Extension, Xdp, XdpFlags},
util::KernelVersion, util::KernelVersion,
Bpf, BpfLoader, Bpf, BpfLoader,
@ -13,8 +12,7 @@ fn xdp() {
return; return;
} }
let bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/pass"); let mut bpf = Bpf::load(crate::PASS).unwrap();
let mut bpf = Bpf::load(bytes).unwrap();
let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); let dispatcher: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
dispatcher.load().unwrap(); dispatcher.load().unwrap();
dispatcher.attach("lo", XdpFlags::default()).unwrap(); dispatcher.attach("lo", XdpFlags::default()).unwrap();
@ -27,15 +25,12 @@ fn extension() {
eprintln!("skipping test on kernel {kernel_version:?}, XDP uses netlink"); eprintln!("skipping test on kernel {kernel_version:?}, XDP uses netlink");
return; return;
} }
let main_bytes = let mut bpf = Bpf::load(crate::MAIN).unwrap();
include_bytes_aligned!("../../../target/bpfel-unknown-none/release/main.bpf.o");
let mut bpf = Bpf::load(main_bytes).unwrap();
let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap(); let pass: &mut Xdp = bpf.program_mut("pass").unwrap().try_into().unwrap();
pass.load().unwrap(); pass.load().unwrap();
pass.attach("lo", XdpFlags::default()).unwrap(); pass.attach("lo", XdpFlags::default()).unwrap();
let ext_bytes = include_bytes_aligned!("../../../target/bpfel-unknown-none/release/ext.bpf.o"); let mut bpf = BpfLoader::new().extension("drop").load(crate::EXT).unwrap();
let mut bpf = BpfLoader::new().extension("drop").load(ext_bytes).unwrap();
let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap(); let drop_: &mut Extension = bpf.program_mut("drop").unwrap().try_into().unwrap();
drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap(); drop_.load(pass.fd().unwrap(), "xdp_pass").unwrap();
} }

@ -7,7 +7,6 @@ if [ "$(uname -s)" = "Darwin" ]; then
fi fi
AYA_SOURCE_DIR="$(realpath $(dirname $0)/..)" AYA_SOURCE_DIR="$(realpath $(dirname $0)/..)"
LIBBPF_DIR=$1
# Temporary directory for tests to use. # Temporary directory for tests to use.
AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp" AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp"
@ -236,25 +235,16 @@ cleanup_vm() {
fi fi
} }
if [ -z "$LIBBPF_DIR" ]; then
echo "path to libbpf required"
exit 1
fi
start_vm start_vm
trap cleanup_vm EXIT trap cleanup_vm EXIT
# make sure we always use fresh aya and libbpf (also see comment at the end) # make sure we always use fresh sources (also see comment at the end)
exec_vm "rm -rf aya/* libbpf" exec_vm "rm -rf aya/*"
rsync_vm "--exclude=target --exclude=.tmp $AYA_SOURCE_DIR" rsync_vm "--exclude=target --exclude=.tmp $AYA_SOURCE_DIR"
rsync_vm "$LIBBPF_DIR"
# need to build or linting will fail trying to include object files; don't run the tests though. exec_vm "cd aya; cargo xtask integration-test"
exec_vm "cd aya; cargo xtask integration-test --libbpf-dir ~/libbpf -- filter-that-matches-nothing"
exec_vm "cd aya; cargo clippy --all-targets -p integration-test -- --deny warnings"
exec_vm "cd aya; cargo xtask integration-test --libbpf-dir ~/libbpf"
# we rm and sync but it doesn't seem to work reliably - I guess we could sleep a # we rm and sync but it doesn't seem to work reliably - I guess we could sleep a
# few seconds after but ain't nobody got time for that. Instead we also rm # few seconds after but ain't nobody got time for that. Instead we also rm
# before rsyncing. # before rsyncing.
exec_vm "rm -rf aya/* libbpf; sync" exec_vm "rm -rf aya/*; sync"

@ -1,138 +0,0 @@
use std::{
borrow::Cow,
env,
ffi::{OsStr, OsString},
fs,
path::{Path, PathBuf},
process::Command,
};
use anyhow::Result;
use clap::Parser;
use crate::utils::{exec, workspace_root};
#[derive(Debug, Copy, Clone)]
pub enum Architecture {
BpfEl,
BpfEb,
}
impl std::str::FromStr for Architecture {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"bpfel-unknown-none" => Architecture::BpfEl,
"bpfeb-unknown-none" => Architecture::BpfEb,
_ => return Err("invalid target"),
})
}
}
impl std::fmt::Display for Architecture {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Architecture::BpfEl => "bpfel-unknown-none",
Architecture::BpfEb => "bpfeb-unknown-none",
})
}
}
#[derive(Debug, Parser)]
pub struct BuildEbpfOptions {
/// Set the endianness of the BPF target
#[clap(default_value = "bpfel-unknown-none", long)]
pub target: Architecture,
/// Libbpf dir, required for compiling C code
#[clap(long, action)]
pub libbpf_dir: PathBuf,
}
pub fn build_ebpf(opts: BuildEbpfOptions) -> Result<()> {
build_rust_ebpf(&opts)?;
build_c_ebpf(&opts)
}
fn build_rust_ebpf(opts: &BuildEbpfOptions) -> Result<()> {
let BuildEbpfOptions {
target,
libbpf_dir: _,
} = opts;
let mut dir = PathBuf::from(workspace_root());
dir.push("test/integration-ebpf");
exec(
Command::new("cargo")
.current_dir(&dir)
.args(["+nightly", "build", "--release", "--target"])
.arg(target.to_string())
.args(["-Z", "build-std=core"])
.current_dir(&dir),
)
}
fn get_libbpf_headers(libbpf_dir: &Path, include_path: &Path) -> Result<()> {
fs::create_dir_all(include_path)?;
let mut includedir = OsString::new();
includedir.push("INCLUDEDIR=");
includedir.push(include_path);
exec(
Command::new("make")
.current_dir(libbpf_dir.join("src"))
.arg(includedir)
.arg("install_headers"),
)
}
fn build_c_ebpf(opts: &BuildEbpfOptions) -> Result<()> {
let BuildEbpfOptions { target, libbpf_dir } = opts;
let mut src = PathBuf::from(workspace_root());
src.push("test/integration-ebpf/src/bpf");
let mut out_path = PathBuf::from(workspace_root());
out_path.push("target");
out_path.push(target.to_string());
out_path.push("release");
let include_path = out_path.join("include");
get_libbpf_headers(libbpf_dir, &include_path)?;
let files = fs::read_dir(&src).unwrap();
for file in files {
let p = file.unwrap().path();
if let Some(ext) = p.extension() {
if ext == "c" {
let mut out = PathBuf::from(&out_path);
out.push(p.file_name().unwrap());
out.set_extension("o");
compile_with_clang(&p, &out, &include_path)?;
}
}
}
Ok(())
}
/// Build eBPF programs with clang and libbpf headers.
fn compile_with_clang(src: &Path, out: &Path, include_path: &Path) -> Result<()> {
let clang: Cow<'_, _> = match env::var_os("CLANG") {
Some(val) => val.into(),
None => OsStr::new("/usr/bin/clang").into(),
};
let arch = match env::consts::ARCH {
"x86_64" => "x86",
"aarch64" => "arm64",
arch => arch,
};
exec(
Command::new(clang)
.arg("-I")
.arg(include_path)
.args(["-g", "-O2", "-target", "bpf", "-c"])
.arg(format!("-D__TARGET_ARCH_{arch}"))
.arg(src)
.arg("-o")
.arg(out),
)
}

@ -3,20 +3,22 @@ use std::path::PathBuf;
use aya_tool::{bindgen, write_to_file}; use aya_tool::{bindgen, write_to_file};
use crate::codegen::{Architecture, Options}; use crate::codegen::{Architecture, SysrootOptions};
pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> { pub fn codegen(opts: &SysrootOptions) -> Result<(), anyhow::Error> {
codegen_internal_btf_bindings(opts)?; codegen_internal_btf_bindings()?;
codegen_bindings(opts) codegen_bindings(opts)
} }
fn codegen_internal_btf_bindings(opts: &Options) -> Result<(), anyhow::Error> { fn codegen_internal_btf_bindings() -> Result<(), anyhow::Error> {
let dir = PathBuf::from("aya-obj"); let dir = PathBuf::from("aya-obj");
let generated = dir.join("src/generated"); let generated = dir.join("src/generated");
let libbpf_dir = PathBuf::from("libbpf");
let mut bindgen = bindgen::user_builder() let mut bindgen = bindgen::user_builder()
.clang_arg(format!( .clang_arg(format!(
"-I{}", "-I{}",
opts.libbpf_dir libbpf_dir
.join("include/uapi") .join("include/uapi")
.canonicalize() .canonicalize()
.unwrap() .unwrap()
@ -24,17 +26,13 @@ fn codegen_internal_btf_bindings(opts: &Options) -> Result<(), anyhow::Error> {
)) ))
.clang_arg(format!( .clang_arg(format!(
"-I{}", "-I{}",
opts.libbpf_dir libbpf_dir
.join("include") .join("include")
.canonicalize() .canonicalize()
.unwrap() .unwrap()
.to_string_lossy() .to_string_lossy()
)) ))
.header( .header(libbpf_dir.join("src/libbpf_internal.h").to_string_lossy())
opts.libbpf_dir
.join("src/libbpf_internal.h")
.to_string_lossy(),
)
.constified_enum_module("bpf_core_relo_kind"); .constified_enum_module("bpf_core_relo_kind");
let types = ["bpf_core_relo", "btf_ext_header"]; let types = ["bpf_core_relo", "btf_ext_header"];
@ -54,7 +52,13 @@ fn codegen_internal_btf_bindings(opts: &Options) -> Result<(), anyhow::Error> {
Ok(()) Ok(())
} }
fn codegen_bindings(opts: &Options) -> Result<(), anyhow::Error> { fn codegen_bindings(opts: &SysrootOptions) -> Result<(), anyhow::Error> {
let SysrootOptions {
x86_64_sysroot,
aarch64_sysroot,
armv7_sysroot,
riscv64_sysroot,
} = opts;
let types = [ let types = [
// BPF // BPF
"BPF_TYPES", "BPF_TYPES",
@ -158,15 +162,13 @@ fn codegen_bindings(opts: &Options) -> Result<(), anyhow::Error> {
let dir = PathBuf::from("aya-obj"); let dir = PathBuf::from("aya-obj");
let generated = dir.join("src/generated"); let generated = dir.join("src/generated");
let libbpf_dir = PathBuf::from("libbpf");
let builder = || { let builder = || {
bindgen::user_builder() bindgen::user_builder()
.header(dir.join("include/linux_wrapper.h").to_string_lossy()) .header(dir.join("include/linux_wrapper.h").to_string_lossy())
.clang_args(&[ .clang_args(&["-I", &*libbpf_dir.join("include/uapi").to_string_lossy()])
"-I", .clang_args(&["-I", &*libbpf_dir.join("include").to_string_lossy()])
&*opts.libbpf_dir.join("include/uapi").to_string_lossy(),
])
.clang_args(&["-I", &*opts.libbpf_dir.join("include").to_string_lossy()])
}; };
for arch in Architecture::supported() { for arch in Architecture::supported() {
@ -185,10 +187,10 @@ fn codegen_bindings(opts: &Options) -> Result<(), anyhow::Error> {
// Set the sysroot. This is needed to ensure that the correct arch // Set the sysroot. This is needed to ensure that the correct arch
// specific headers are imported. // specific headers are imported.
let sysroot = match arch { let sysroot = match arch {
Architecture::X86_64 => &opts.x86_64_sysroot, Architecture::X86_64 => x86_64_sysroot,
Architecture::ARMv7 => &opts.armv7_sysroot, Architecture::ARMv7 => armv7_sysroot,
Architecture::AArch64 => &opts.aarch64_sysroot, Architecture::AArch64 => aarch64_sysroot,
Architecture::RISCV64 => &opts.riscv64_sysroot, Architecture::RISCV64 => riscv64_sysroot,
}; };
bindgen = bindgen.clang_args(&["-I", &*sysroot.to_string_lossy()]); bindgen = bindgen.clang_args(&["-I", &*sysroot.to_string_lossy()]);

@ -8,11 +8,19 @@ use syn::{parse_str, Item};
use crate::codegen::{ use crate::codegen::{
helpers::{expand_helpers, extract_helpers}, helpers::{expand_helpers, extract_helpers},
Architecture, Options, Architecture, SysrootOptions,
}; };
pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> { pub fn codegen(opts: &SysrootOptions) -> Result<(), anyhow::Error> {
let SysrootOptions {
x86_64_sysroot,
aarch64_sysroot,
armv7_sysroot,
riscv64_sysroot,
} = opts;
let dir = PathBuf::from("bpf/aya-bpf-bindings"); let dir = PathBuf::from("bpf/aya-bpf-bindings");
let libbpf_dir = PathBuf::from("libbpf");
let builder = || { let builder = || {
let mut bindgen = bindgen::bpf_builder() let mut bindgen = bindgen::bpf_builder()
@ -20,12 +28,9 @@ pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> {
// aya-tool uses aya_bpf::cty. We can't use that here since aya-bpf // aya-tool uses aya_bpf::cty. We can't use that here since aya-bpf
// depends on aya-bpf-bindings so it would create a circular dep. // depends on aya-bpf-bindings so it would create a circular dep.
.ctypes_prefix("::aya_bpf_cty") .ctypes_prefix("::aya_bpf_cty")
.clang_args(&[ .clang_args(&["-I", &*libbpf_dir.join("include/uapi").to_string_lossy()])
"-I", .clang_args(&["-I", &*libbpf_dir.join("include").to_string_lossy()])
&*opts.libbpf_dir.join("include/uapi").to_string_lossy(), .clang_args(&["-I", &*libbpf_dir.join("src").to_string_lossy()])
])
.clang_args(&["-I", &*opts.libbpf_dir.join("include").to_string_lossy()])
.clang_args(&["-I", &*opts.libbpf_dir.join("src").to_string_lossy()])
// open aya-bpf-bindings/.../bindings.rs and look for mod // open aya-bpf-bindings/.../bindings.rs and look for mod
// _bindgen, those are anonymous enums // _bindgen, those are anonymous enums
.constified_enum("BPF_F_.*") .constified_enum("BPF_F_.*")
@ -82,10 +87,10 @@ pub fn codegen(opts: &Options) -> Result<(), anyhow::Error> {
// Set the sysroot. This is needed to ensure that the correct arch // Set the sysroot. This is needed to ensure that the correct arch
// specific headers are imported. // specific headers are imported.
let sysroot = match arch { let sysroot = match arch {
Architecture::X86_64 => &opts.x86_64_sysroot, Architecture::X86_64 => x86_64_sysroot,
Architecture::ARMv7 => &opts.armv7_sysroot, Architecture::ARMv7 => armv7_sysroot,
Architecture::AArch64 => &opts.aarch64_sysroot, Architecture::AArch64 => aarch64_sysroot,
Architecture::RISCV64 => &opts.riscv64_sysroot, Architecture::RISCV64 => riscv64_sysroot,
}; };
bindgen = bindgen.clang_args(&["-I", &*sysroot.to_string_lossy()]); bindgen = bindgen.clang_args(&["-I", &*sysroot.to_string_lossy()]);

@ -52,13 +52,10 @@ impl std::fmt::Display for Architecture {
} }
} }
#[derive(Parser)]
pub struct Options {
#[arg(long, action)]
libbpf_dir: PathBuf,
// sysroot options. Default to ubuntu headers installed by the // sysroot options. Default to ubuntu headers installed by the
// libc6-dev-{arm64,armel}-cross packages. // libc6-dev-{arm64,armel}-cross packages.
#[derive(Parser)]
pub struct SysrootOptions {
#[arg(long, default_value = "/usr/include/x86_64-linux-gnu", action)] #[arg(long, default_value = "/usr/include/x86_64-linux-gnu", action)]
x86_64_sysroot: PathBuf, x86_64_sysroot: PathBuf,
@ -70,6 +67,12 @@ pub struct Options {
#[arg(long, default_value = "/usr/riscv64-linux-gnu/include", action)] #[arg(long, default_value = "/usr/riscv64-linux-gnu/include", action)]
riscv64_sysroot: PathBuf, riscv64_sysroot: PathBuf,
}
#[derive(Parser)]
pub struct Options {
#[command(flatten)]
sysroot_options: SysrootOptions,
#[command(subcommand)] #[command(subcommand)]
command: Option<Command>, command: Option<Command>,
@ -84,13 +87,18 @@ enum Command {
} }
pub fn codegen(opts: Options) -> Result<(), anyhow::Error> { pub fn codegen(opts: Options) -> Result<(), anyhow::Error> {
use Command::*; let Options {
match opts.command { sysroot_options,
Some(Aya) => aya::codegen(&opts), command,
Some(AyaBpfBindings) => aya_bpf_bindings::codegen(&opts), } = opts;
match command {
Some(command) => match command {
Command::Aya => aya::codegen(&sysroot_options),
Command::AyaBpfBindings => aya_bpf_bindings::codegen(&sysroot_options),
},
None => { None => {
aya::codegen(&opts)?; aya::codegen(&sysroot_options)?;
aya_bpf_bindings::codegen(&opts) aya_bpf_bindings::codegen(&sysroot_options)
} }
} }
} }

@ -1,5 +1,4 @@
use crate::utils::exec; use anyhow::{anyhow, Context as _, Result};
use anyhow::{Context as _, Result};
use std::{ use std::{
path::{Path, PathBuf}, path::{Path, PathBuf},
process::Command, process::Command,
@ -9,6 +8,19 @@ use std::{fs, io, io::Write};
use indoc::indoc; use indoc::indoc;
pub fn exec(cmd: &mut Command) -> Result<()> {
let status = cmd
.status()
.with_context(|| format!("failed to run {cmd:?}"))?;
match status.code() {
Some(code) => match code {
0 => Ok(()),
code => Err(anyhow!("{cmd:?} exited with code {code}")),
},
None => Err(anyhow!("{cmd:?} terminated by signal")),
}
}
pub fn docs() -> Result<()> { pub fn docs() -> Result<()> {
let current_dir = PathBuf::from("."); let current_dir = PathBuf::from(".");
let header_path = current_dir.join("header.html"); let header_path = current_dir.join("header.html");

@ -1,12 +1,9 @@
mod build_ebpf;
mod codegen; mod codegen;
mod docs; mod docs;
mod run; mod run;
pub(crate) mod utils;
use std::process::exit;
use clap::Parser; use clap::Parser;
#[derive(Parser)] #[derive(Parser)]
pub struct XtaskOptions { pub struct XtaskOptions {
#[clap(subcommand)] #[clap(subcommand)]
@ -17,20 +14,27 @@ pub struct XtaskOptions {
enum Command { enum Command {
Codegen(codegen::Options), Codegen(codegen::Options),
Docs, Docs,
BuildIntegrationTest(run::BuildOptions),
IntegrationTest(run::Options), IntegrationTest(run::Options),
} }
fn main() { fn main() -> anyhow::Result<()> {
let XtaskOptions { command } = Parser::parse(); let XtaskOptions { command } = Parser::parse();
let ret = match command { match command {
Command::Codegen(opts) => codegen::codegen(opts), Command::Codegen(opts) => codegen::codegen(opts),
Command::Docs => docs::docs(), Command::Docs => docs::docs(),
Command::IntegrationTest(opts) => run::run(opts), Command::BuildIntegrationTest(opts) => {
}; let binaries = run::build(opts)?;
let mut stdout = std::io::stdout();
for (_name, binary) in binaries {
use std::{io::Write as _, os::unix::ffi::OsStrExt as _};
if let Err(e) = ret { stdout.write_all(binary.as_os_str().as_bytes())?;
eprintln!("{e:#}"); stdout.write_all("\n".as_bytes())?;
exit(1); }
Ok(())
}
Command::IntegrationTest(opts) => run::run(opts),
} }
} }

@ -9,31 +9,33 @@ use anyhow::{Context as _, Result};
use cargo_metadata::{Artifact, CompilerMessage, Message, Target}; use cargo_metadata::{Artifact, CompilerMessage, Message, Target};
use clap::Parser; use clap::Parser;
use crate::build_ebpf::{build_ebpf, Architecture, BuildEbpfOptions as BuildOptions};
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
pub struct Options { pub struct BuildOptions {
/// Set the endianness of the BPF target /// Pass --release to `cargo build`.
#[clap(default_value = "bpfel-unknown-none", long)]
pub bpf_target: Architecture,
/// Build and run the release target
#[clap(long)] #[clap(long)]
pub release: bool, pub release: bool,
/// The command used to wrap your application /// Pass --target to `cargo build`.
#[clap(long)]
pub target: Option<String>,
}
#[derive(Debug, Parser)]
pub struct Options {
#[command(flatten)]
pub build_options: BuildOptions,
/// The command used to wrap your application.
#[clap(short, long, default_value = "sudo -E")] #[clap(short, long, default_value = "sudo -E")]
pub runner: String, pub runner: String,
/// libbpf directory /// Arguments to pass to your application.
#[clap(long, action)]
pub libbpf_dir: PathBuf,
/// Arguments to pass to your application
#[clap(name = "args", last = true)] #[clap(name = "args", last = true)]
pub run_args: Vec<String>, pub run_args: Vec<String>,
} }
/// Build the project /// Build the project
fn build(release: bool) -> Result<Vec<(PathBuf, PathBuf)>> { pub fn build(opts: BuildOptions) -> Result<Vec<(String, PathBuf)>> {
let BuildOptions { release, target } = opts;
let mut cmd = Command::new("cargo"); let mut cmd = Command::new("cargo");
cmd.args([ cmd.env("AYA_BUILD_INTEGRATION_BPF", "true").args([
"build", "build",
"--tests", "--tests",
"--message-format=json", "--message-format=json",
@ -42,6 +44,9 @@ fn build(release: bool) -> Result<Vec<(PathBuf, PathBuf)>> {
if release { if release {
cmd.arg("--release"); cmd.arg("--release");
} }
if let Some(target) = target {
cmd.args(["--target", &target]);
}
let mut cmd = cmd let mut cmd = cmd
.stdout(Stdio::piped()) .stdout(Stdio::piped())
.spawn() .spawn()
@ -55,16 +60,17 @@ fn build(release: bool) -> Result<Vec<(PathBuf, PathBuf)>> {
match message.context("valid JSON")? { match message.context("valid JSON")? {
Message::CompilerArtifact(Artifact { Message::CompilerArtifact(Artifact {
executable, executable,
target: Target { src_path, .. }, target: Target { name, .. },
.. ..
}) => { }) => {
if let Some(executable) = executable { if let Some(executable) = executable {
executables.push((src_path.into(), executable.into())); executables.push((name, executable.into()));
} }
} }
Message::CompilerMessage(CompilerMessage { message, .. }) => { Message::CompilerMessage(CompilerMessage { message, .. }) => {
assert_eq!(writeln!(&mut compiler_messages, "{message}"), Ok(())); writeln!(&mut compiler_messages, "{message}").context("String write failed")?
} }
_ => {} _ => {}
} }
} }
@ -87,27 +93,18 @@ fn build(release: bool) -> Result<Vec<(PathBuf, PathBuf)>> {
/// Build and run the project /// Build and run the project
pub fn run(opts: Options) -> Result<()> { pub fn run(opts: Options) -> Result<()> {
let Options { let Options {
bpf_target, build_options,
release,
runner, runner,
libbpf_dir,
run_args, run_args,
} = opts; } = opts;
// build our ebpf program followed by our application let binaries = build(build_options).context("error while building userspace application")?;
build_ebpf(BuildOptions {
target: bpf_target,
libbpf_dir,
})
.context("error while building eBPF program")?;
let binaries = build(release).context("error while building userspace application")?;
let mut args = runner.trim().split_terminator(' '); let mut args = runner.trim().split_terminator(' ');
let runner = args.next().ok_or(anyhow::anyhow!("no first argument"))?; let runner = args.next().ok_or(anyhow::anyhow!("no first argument"))?;
let args = args.collect::<Vec<_>>(); let args = args.collect::<Vec<_>>();
let mut failures = String::new(); let mut failures = String::new();
for (src_path, binary) in binaries { for (name, binary) in binaries {
let mut cmd = Command::new(runner); let mut cmd = Command::new(runner);
let cmd = cmd let cmd = cmd
.args(args.iter()) .args(args.iter())
@ -115,7 +112,7 @@ pub fn run(opts: Options) -> Result<()> {
.args(run_args.iter()) .args(run_args.iter())
.arg("--test-threads=1"); .arg("--test-threads=1");
println!("{} running {cmd:?}", src_path.display()); println!("{} running {cmd:?}", name);
let status = cmd let status = cmd
.status() .status()
@ -123,19 +120,11 @@ pub fn run(opts: Options) -> Result<()> {
match status.code() { match status.code() {
Some(code) => match code { Some(code) => match code {
0 => {} 0 => {}
code => assert_eq!( code => writeln!(&mut failures, "{} exited with status code {code}", name)
writeln!( .context("String write failed")?,
&mut failures,
"{} exited with status code {code}",
src_path.display()
),
Ok(())
),
}, },
None => assert_eq!( None => writeln!(&mut failures, "{} terminated by signal", name)
writeln!(&mut failures, "{} terminated by signal", src_path.display()), .context("String write failed")?,
Ok(())
),
} }
} }
if failures.is_empty() { if failures.is_empty() {

@ -1,24 +0,0 @@
use std::{cell::OnceCell, process::Command};
use anyhow::{bail, Context as _, Result};
pub fn workspace_root() -> &'static str {
static mut WORKSPACE_ROOT: OnceCell<String> = OnceCell::new();
unsafe { &mut WORKSPACE_ROOT }.get_or_init(|| {
let cmd = cargo_metadata::MetadataCommand::new();
cmd.exec().unwrap().workspace_root.to_string()
})
}
pub fn exec(cmd: &mut Command) -> Result<()> {
let status = cmd
.status()
.with_context(|| format!("failed to run {cmd:?}"))?;
match status.code() {
Some(code) => match code {
0 => Ok(()),
code => bail!("{cmd:?} exited with code {code}"),
},
None => bail!("{cmd:?} terminated by signal"),
}
}
Loading…
Cancel
Save