Merge pull request #706 from aya-rs/reloc-tests

integration-test: Remove runtime toolchain deps
reviewable/pr712/r1
Tamir Duberstein 1 year ago committed by GitHub
commit 3692e53ff0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -43,6 +43,7 @@ jobs:
- name: Run miri - name: Run miri
run: | run: |
set -euxo pipefail
cargo hack miri test --all-targets --feature-powerset \ cargo hack miri test --all-targets --feature-powerset \
--exclude aya-bpf \ --exclude aya-bpf \
--exclude aya-bpf-bindings \ --exclude aya-bpf-bindings \
@ -79,6 +80,7 @@ jobs:
- name: Build - name: Build
run: | run: |
set -euxo pipefail
cargo hack build --all-targets --feature-powerset \ cargo hack build --all-targets --feature-powerset \
--exclude aya-bpf \ --exclude aya-bpf \
--exclude aya-bpf-bindings \ --exclude aya-bpf-bindings \
@ -90,6 +92,7 @@ jobs:
env: env:
RUST_BACKTRACE: full RUST_BACKTRACE: full
run: | run: |
set -euxo pipefail
cargo hack test --all-targets --feature-powerset \ cargo hack test --all-targets --feature-powerset \
--exclude aya-bpf \ --exclude aya-bpf \
--exclude aya-bpf-bindings \ --exclude aya-bpf-bindings \
@ -122,7 +125,7 @@ jobs:
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
- name: Prereqs - name: bpf-linker
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
- uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-hack
@ -130,26 +133,71 @@ jobs:
env: env:
CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }} CARGO_CFG_BPF_TARGET_ARCH: ${{ matrix.arch }}
run: | run: |
set -euxo pipefail
cargo hack build --package aya-bpf --package aya-log-ebpf \ cargo hack build --package aya-bpf --package aya-log-ebpf \
--feature-powerset \ --feature-powerset \
--target ${{ matrix.target }} \ --target ${{ matrix.target }} \
-Z build-std=core -Z build-std=core
integration-test: build-integration-test:
runs-on: macos-latest runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
# See https://doc.rust-lang.org/cargo/reference/profiles.html for the names
# of the builtin profiles. Note that dev builds "debug" targets.
profile:
- release
- dev
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:
submodules: recursive submodules: recursive
- uses: dtolnay/rust-toolchain@master
with:
toolchain: nightly
components: rust-src
- uses: Swatinem/rust-cache@v2
- name: bpf-linker
run: cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
- name: Install dependencies
# ubuntu-22.04 comes with clang 14[0] which doesn't include support for signed and 64bit
# enum values which was added in clang 15[1].
#
# gcc-multilib provides at least <asm/types.h> which is referenced by libbpf.
#
# llvm provides llvm-objcopy which is used to build the BTF relocation tests.
#
# [0] https://github.com/actions/runner-images/blob/ubuntu22/20230724.1/images/linux/Ubuntu2204-Readme.md
#
# [1] https://github.com/llvm/llvm-project/commit/dc1c43d
run: |
set -euxo pipefail
wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc
echo deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy main | sudo tee /etc/apt/sources.list.d/llvm.list
sudo apt-get update
sudo apt-get -y install clang gcc-multilib llvm
- name: Build
run: |
set -euxo pipefail
mkdir -p integration-test-binaries
# See https://doc.rust-lang.org/cargo/reference/profiles.html for the
# names of the builtin profiles. Note that dev builds "debug" targets.
cargo xtask build-integration-test --cargo-arg=--profile=dev | xargs -I % cp % integration-test-binaries/dev
cargo xtask build-integration-test --cargo-arg=--profile=release | xargs -I % cp % integration-test-binaries/release
- uses: actions/upload-artifact@v3
with:
name: integration-test-binaries
path: integration-test-binaries
run-integration-test:
runs-on: macos-latest
needs: ["build-integration-test"]
steps:
- uses: actions/checkout@v3
with:
sparse-checkout: |
test/run.sh
test/cloud-localds
- name: Install Pre-requisites - name: Install Pre-requisites
run: | run: |
brew install qemu gnu-getopt coreutils cdrtools brew install qemu gnu-getopt coreutils cdrtools
@ -161,20 +209,25 @@ jobs:
.tmp/*.qcow2 .tmp/*.qcow2
.tmp/test_rsa .tmp/test_rsa
.tmp/test_rsa.pub .tmp/test_rsa.pub
# FIXME: we should invalidate the cache on new bpf-linker releases.
# For now we must manually delete the cache when we release a new
# bpf-linker version.
key: tmp-files-${{ hashFiles('test/run.sh') }} key: tmp-files-${{ hashFiles('test/run.sh') }}
- uses: actions/download-artifact@v3
with:
name: integration-test-binaries
path: integration-test-binaries
- name: Run integration tests - name: Run integration tests
run: test/run.sh --cargo-arg=--profile=${{ matrix.profile }} run: |
set -euxo pipefail
find integration-test-binaries -type f -exec chmod +x {} \;
test/run.sh integration-test-binaries
# Provides a single status check for the entire build workflow. # Provides a single status check for the entire build workflow.
# This is used for merge automation, like Mergify, since GH actions # This is used for merge automation, like Mergify, since GH actions
# has no concept of "when all status checks pass". # has no concept of "when all status checks pass".
# https://docs.mergify.com/conditions/#validating-all-status-checks # https://docs.mergify.com/conditions/#validating-all-status-checks
build-workflow-complete: build-workflow-complete:
needs: ["lint", "build-test-aya", "build-test-aya-bpf", "integration-test"] needs: ["lint", "build-test-aya", "build-test-aya-bpf", "run-integration-test"]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Build Complete - name: Build Complete

@ -75,8 +75,8 @@ netns-rs = { version = "0.1", default-features = false }
num_enum = { version = "0.6", default-features = false } num_enum = { version = "0.6", default-features = false }
object = { version = "0.31", default-features = false } object = { version = "0.31", default-features = false }
parking_lot = { version = "0.12.0", default-features = false } parking_lot = { version = "0.12.0", default-features = false }
proc-macro2 = { version = "1", default-features = false }
proc-macro-error = { version = "1.0", default-features = false } proc-macro-error = { version = "1.0", default-features = false }
proc-macro2 = { version = "1", default-features = false }
public-api = { version = "0.31.2", default-features = false } public-api = { version = "0.31.2", default-features = false }
quote = { version = "1", default-features = false } quote = { version = "1", default-features = false }
rbpf = { version = "0.2.0", default-features = false } rbpf = { version = "0.2.0", default-features = false }
@ -85,6 +85,7 @@ rustup-toolchain = { version = "0.1.5", default-features = false }
rustversion = { version = "1.0.0", default-features = false } rustversion = { version = "1.0.0", default-features = false }
syn = { version = "2", default-features = false } syn = { version = "2", default-features = false }
tempfile = { version = "3", default-features = false } tempfile = { version = "3", default-features = false }
test-case = { version = "3.1.0", default-features = false }
testing_logger = { version = "0.1.1", default-features = false } testing_logger = { version = "0.1.1", default-features = false }
thiserror = { version = "1", default-features = false } thiserror = { version = "1", default-features = false }
tokio = { version = "1.24.0", default-features = false } tokio = { version = "1.24.0", default-features = false }

@ -403,16 +403,16 @@ mod tests {
let id1 = links.insert(l1).unwrap(); let id1 = links.insert(l1).unwrap();
let id2 = links.insert(l2).unwrap(); let id2 = links.insert(l2).unwrap();
assert!(*l1_detached.borrow() == 0); assert_eq!(*l1_detached.borrow(), 0);
assert!(*l2_detached.borrow() == 0); assert_eq!(*l2_detached.borrow(), 0);
assert!(links.remove(id1).is_ok()); assert!(links.remove(id1).is_ok());
assert!(*l1_detached.borrow() == 1); assert_eq!(*l1_detached.borrow(), 1);
assert!(*l2_detached.borrow() == 0); assert_eq!(*l2_detached.borrow(), 0);
assert!(links.remove(id2).is_ok()); assert!(links.remove(id2).is_ok());
assert!(*l1_detached.borrow() == 1); assert_eq!(*l1_detached.borrow(), 1);
assert!(*l2_detached.borrow() == 1); assert_eq!(*l2_detached.borrow(), 1);
} }
#[test] #[test]
@ -451,12 +451,12 @@ mod tests {
links.insert(l2).unwrap(); links.insert(l2).unwrap();
// manually remove one link // manually remove one link
assert!(links.remove(id1).is_ok()); assert!(links.remove(id1).is_ok());
assert!(*l1_detached.borrow() == 1); assert_eq!(*l1_detached.borrow(), 1);
assert!(*l2_detached.borrow() == 0); assert_eq!(*l2_detached.borrow(), 0);
} }
// remove the other on drop // remove the other on drop
assert!(*l1_detached.borrow() == 1); assert_eq!(*l1_detached.borrow(), 1);
assert!(*l2_detached.borrow() == 1); assert_eq!(*l2_detached.borrow(), 1);
} }
#[test] #[test]
@ -472,19 +472,19 @@ mod tests {
links.insert(l2).unwrap(); links.insert(l2).unwrap();
// manually forget one link // manually forget one link
let owned_l1 = links.forget(id1); let owned_l1 = links.forget(id1);
assert!(*l1_detached.borrow() == 0); assert_eq!(*l1_detached.borrow(), 0);
assert!(*l2_detached.borrow() == 0); assert_eq!(*l2_detached.borrow(), 0);
owned_l1.unwrap() owned_l1.unwrap()
}; };
// l2 is detached on `Drop`, but l1 is still alive // l2 is detached on `Drop`, but l1 is still alive
assert!(*l1_detached.borrow() == 0); assert_eq!(*l1_detached.borrow(), 0);
assert!(*l2_detached.borrow() == 1); assert_eq!(*l2_detached.borrow(), 1);
// manually detach l1 // manually detach l1
assert!(owned_l1.detach().is_ok()); assert!(owned_l1.detach().is_ok());
assert!(*l1_detached.borrow() == 1); assert_eq!(*l1_detached.borrow(), 1);
assert!(*l2_detached.borrow() == 1); assert_eq!(*l2_detached.borrow(), 1);
} }
#[test] #[test]

@ -33,7 +33,9 @@ cargo xtask integration-test
### Virtualized ### Virtualized
``` ```
./test/run.sh mkdir -p integration-test-binaries
cargo xtask build-integration-test | xargs -I % cp % integration-test-binaries
./test/run.sh integration-test-binaries
``` ```
### Writing an integration test ### Writing an integration test

@ -15,6 +15,7 @@ log = { workspace = true }
netns-rs = { workspace = true } netns-rs = { workspace = true }
object = { workspace = true } object = { workspace = true }
rbpf = { workspace = true } rbpf = { workspace = true }
test-case = { workspace = true }
tokio = { workspace = true, default-features = false, features = [ tokio = { workspace = true, default-features = false, features = [
"macros", "macros",
"time", "time",

@ -0,0 +1,106 @@
// clang-format off
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
// clang-format on
char _license[] __attribute__((section("license"), used)) = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, __u32);
__type(value, __u64);
__uint(max_entries, 1);
} output_map SEC(".maps");
long set_output(__u64 value) {
__u32 key = 0;
return bpf_map_update_elem(&output_map, &key, &value, BPF_ANY);
}
struct relocated_struct_with_scalars {
__u8 a;
__u8 b;
__u8 c;
};
__attribute__((noinline)) int field_global() {
struct relocated_struct_with_scalars s = {1, 2, 3};
return set_output(__builtin_preserve_access_index(s.b));
}
SEC("uprobe/field") int field(void *ctx) {
return field_global();
}
struct relocated_struct_with_pointer {
struct relocated_struct_with_pointer *first;
struct relocated_struct_with_pointer *second;
};
__attribute__((noinline)) int pointer_global() {
struct relocated_struct_with_pointer s = {
(struct relocated_struct_with_pointer *)42,
(struct relocated_struct_with_pointer *)21,
};
return set_output((__u64)__builtin_preserve_access_index(s.first));
}
SEC("uprobe/pointer") int pointer(void *ctx) {
return pointer_global();
}
__attribute__((noinline)) int struct_flavors_global() {
struct relocated_struct_with_scalars s = {1, 2, 3};
if (bpf_core_field_exists(s.a)) {
return set_output(__builtin_preserve_access_index(s.a));
} else {
return set_output(__builtin_preserve_access_index(s.b));
}
}
SEC("uprobe/struct_flavors") int struct_flavors(void *ctx) {
return struct_flavors_global();
}
enum relocated_enum_unsigned_32 { U32 = 0xAAAAAAAA };
__attribute__((noinline)) int enum_unsigned_32_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_unsigned_32, U32));
}
SEC("uprobe/enum_unsigned_32")
int enum_unsigned_32(void *ctx) {
return enum_unsigned_32_global();
}
enum relocated_enum_signed_32 { S32 = -0x7AAAAAAA };
__attribute__((noinline)) int enum_signed_32_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_signed_32, S32));
}
SEC("uprobe/enum_signed_32") int enum_signed_32(void *ctx) {
return enum_signed_32_global();
}
enum relocated_enum_unsigned_64 { U64 = 0xAAAAAAAABBBBBBBB };
__attribute__((noinline)) int enum_unsigned_64_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_unsigned_64, U64));
}
SEC("uprobe/enum_unsigned_64")
int enum_unsigned_64(void *ctx) {
return enum_unsigned_64_global();
}
enum relocated_enum_signed_64 { u64 = -0xAAAAAAABBBBBBBB };
__attribute__((noinline)) int enum_signed_64_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_signed_64, u64));
}
SEC("uprobe/enum_signed_64") int enum_signed_64(void *ctx) {
return enum_signed_64_global();
}

@ -0,0 +1,77 @@
// clang-format off
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
// clang-format on
#include <stdlib.h>
long set_output(__u64 value) { exit((int)value); }
struct relocated_struct_with_scalars {
__u8 b;
__u8 c;
__u8 d;
};
__attribute__((noinline)) int field_global() {
struct relocated_struct_with_scalars s = {1, 2, 3};
return set_output(__builtin_preserve_access_index(s.b));
}
struct relocated_struct_with_pointer {
struct relocated_struct_with_pointer *second;
struct relocated_struct_with_pointer *first;
};
__attribute__((noinline)) int pointer_global() {
struct relocated_struct_with_pointer s = {
(struct relocated_struct_with_pointer *)42,
(struct relocated_struct_with_pointer *)21,
};
return set_output((__u64)__builtin_preserve_access_index(s.first));
}
__attribute__((noinline)) int struct_flavors_global() {
struct relocated_struct_with_scalars s = {1, 2, 3};
if (bpf_core_field_exists(s.b)) {
return set_output(__builtin_preserve_access_index(s.b));
} else {
return set_output(__builtin_preserve_access_index(s.c));
}
}
enum relocated_enum_unsigned_32 { U32 = 0xBBBBBBBB };
__attribute__((noinline)) int enum_unsigned_32_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_unsigned_32, U32));
}
enum relocated_enum_signed_32 { S32 = -0x7BBBBBBB };
__attribute__((noinline)) int enum_signed_32_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_signed_32, S32));
}
enum relocated_enum_unsigned_64 { U64 = 0xCCCCCCCCDDDDDDDD };
__attribute__((noinline)) int enum_unsigned_64_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_unsigned_64, U64));
}
enum relocated_enum_signed_64 { u64 = -0xCCCCCCCDDDDDDDD };
__attribute__((noinline)) int enum_signed_64_global() {
return set_output(bpf_core_enum_value(enum relocated_enum_signed_64, u64));
}
// Avoids dead code elimination by the compiler.
int main() {
field_global();
pointer_global();
struct_flavors_global();
enum_unsigned_32_global();
enum_signed_32_global();
enum_unsigned_64_global();
enum_signed_64_global();
}

@ -64,16 +64,19 @@ fn main() {
panic!("unsupported endian={:?}", endian) panic!("unsupported endian={:?}", endian)
}; };
const C_BPF_PROBES: &[(&str, &str)] = &[ const C_BPF: &[(&str, &str)] = &[
("ext.bpf.c", "ext.bpf.o"), ("ext.bpf.c", "ext.bpf.o"),
("main.bpf.c", "main.bpf.o"), ("main.bpf.c", "main.bpf.o"),
("multimap-btf.bpf.c", "multimap-btf.bpf.o"), ("multimap-btf.bpf.c", "multimap-btf.bpf.o"),
("reloc.bpf.c", "reloc.bpf.o"),
("text_64_64_reloc.c", "text_64_64_reloc.o"), ("text_64_64_reloc.c", "text_64_64_reloc.o"),
]; ];
let c_bpf_probes = C_BPF_PROBES let c_bpf = C_BPF.iter().map(|(src, dst)| (src, out_dir.join(dst)));
.iter()
.map(|(src, dst)| (src, out_dir.join(dst))); const C_BTF: &[(&str, &str)] = &[("reloc.btf.c", "reloc.btf.o")];
let c_btf = C_BTF.iter().map(|(src, dst)| (src, out_dir.join(dst)));
if build_integration_bpf { if build_integration_bpf {
let libbpf_dir = manifest_dir let libbpf_dir = manifest_dir
@ -113,7 +116,7 @@ fn main() {
target_arch.push(arch); target_arch.push(arch);
}; };
for (src, dst) in c_bpf_probes { for (src, dst) in c_bpf {
let src = bpf_dir.join(src); let src = bpf_dir.join(src);
println!("cargo:rerun-if-changed={}", src.to_str().unwrap()); println!("cargo:rerun-if-changed={}", src.to_str().unwrap());
@ -130,6 +133,51 @@ fn main() {
.unwrap(); .unwrap();
} }
for (src, dst) in c_btf {
let src = bpf_dir.join(src);
println!("cargo:rerun-if-changed={}", src.to_str().unwrap());
let mut cmd = Command::new("clang");
cmd.arg("-I")
.arg(&libbpf_headers_dir)
.args(["-g", "-target", target, "-c"])
.arg(&target_arch)
.arg(src)
.args(["-o", "-"]);
let mut child = cmd
.stdout(Stdio::piped())
.spawn()
.unwrap_or_else(|err| panic!("failed to spawn {cmd:?}: {err}"));
let Child { stdout, .. } = &mut child;
let stdout = stdout.take().unwrap();
let mut output = OsString::new();
output.push(".BTF=");
output.push(dst);
exec(
// NB: objcopy doesn't support reading from stdin, so we have to use llvm-objcopy.
Command::new("llvm-objcopy")
.arg("--dump-section")
.arg(output)
.arg("-")
.stdin(stdout),
)
.unwrap();
let status = child
.wait()
.unwrap_or_else(|err| panic!("failed to wait for {cmd:?}: {err}"));
match status.code() {
Some(code) => match code {
0 => {}
code => panic!("{cmd:?} exited with status code {code}"),
},
None => panic!("{cmd:?} terminated by signal"),
}
}
let target = format!("{target}-unknown-none"); let target = format!("{target}-unknown-none");
let Package { manifest_path, .. } = integration_ebpf_package; let Package { manifest_path, .. } = integration_ebpf_package;
@ -225,7 +273,7 @@ fn main() {
.unwrap_or_else(|err| panic!("failed to copy {binary:?} to {dst:?}: {err}")); .unwrap_or_else(|err| panic!("failed to copy {binary:?} to {dst:?}: {err}"));
} }
} else { } else {
for (_src, dst) in c_bpf_probes { for (_src, dst) in c_bpf.chain(c_btf) {
fs::write(&dst, []).unwrap_or_else(|err| panic!("failed to create {dst:?}: {err}")); fs::write(&dst, []).unwrap_or_else(|err| panic!("failed to create {dst:?}: {err}"));
} }

@ -4,6 +4,8 @@ pub const EXT: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/ext.bpf
pub const MAIN: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/main.bpf.o")); pub const MAIN: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/main.bpf.o"));
pub const MULTIMAP_BTF: &[u8] = pub const MULTIMAP_BTF: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/multimap-btf.bpf.o")); include_bytes_aligned!(concat!(env!("OUT_DIR"), "/multimap-btf.bpf.o"));
pub const RELOC_BPF: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/reloc.bpf.o"));
pub const RELOC_BTF: &[u8] = include_bytes_aligned!(concat!(env!("OUT_DIR"), "/reloc.btf.o"));
pub const TEXT_64_64_RELOC: &[u8] = pub const TEXT_64_64_RELOC: &[u8] =
include_bytes_aligned!(concat!(env!("OUT_DIR"), "/text_64_64_reloc.o")); include_bytes_aligned!(concat!(env!("OUT_DIR"), "/text_64_64_reloc.o"));

@ -93,10 +93,11 @@ fn set_kernel_buffer_element(bpf: &mut Bpf, bytes: &[u8]) {
m.set(0, bytes, 0).unwrap(); m.set(0, bytes, 0).unwrap();
} }
#[track_caller]
fn result_bytes(bpf: &Bpf) -> Vec<u8> { fn result_bytes(bpf: &Bpf) -> Vec<u8> {
let m = Array::<_, TestResult>::try_from(bpf.map("RESULT").unwrap()).unwrap(); let m = Array::<_, TestResult>::try_from(bpf.map("RESULT").unwrap()).unwrap();
let result = m.get(&0, 0).unwrap(); let result = m.get(&0, 0).unwrap();
assert!(result.did_error == 0); assert_eq!(result.did_error, 0);
// assert that the buffer is always null terminated // assert that the buffer is always null terminated
assert_eq!(result.buf[result.len], 0); assert_eq!(result.buf[result.len], 0);
result.buf[..result.len].to_vec() result.buf[..result.len].to_vec()

@ -1,405 +1,62 @@
use anyhow::{anyhow, bail, Context as _, Result}; use test_case::test_case;
use std::{
process::{Child, ChildStdout, Command, Stdio}, use aya::{maps::Array, programs::UProbe, util::KernelVersion, BpfLoader, Btf, Endianness};
thread::sleep,
time::Duration, #[test_case("field", false, None, 2)]
}; #[test_case("field", true, None, 1)]
#[test_case("enum_unsigned_32", false, None, 0xAAAAAAAA)]
use aya::{maps::Array, programs::TracePoint, util::KernelVersion, BpfLoader, Btf, Endianness}; #[test_case("enum_unsigned_32", true, None, 0xBBBBBBBB)]
#[test_case("pointer", false, None, 42)]
// In the tests below we often use values like 0xAAAAAAAA or -0x7AAAAAAA. Those values have no #[test_case("pointer", true, None, 21)]
// special meaning, they just have "nice" bit patterns that can be helpful while debugging. #[test_case("struct_flavors", false, None, 1)]
#[test_case("struct_flavors", true, None, 1)]
#[test] #[test_case("enum_signed_32", false, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), -0x7AAAAAAAi32 as u64)]
fn relocate_field() { #[test_case("enum_signed_32", true, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), -0x7BBBBBBBi32 as u64)]
let test = RelocationTest { #[test_case("enum_unsigned_64", false, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), 0xAAAAAAAABBBBBBBB)]
local_definition: r#" #[test_case("enum_unsigned_64", true, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), 0xCCCCCCCCDDDDDDDD)]
struct foo { #[test_case("enum_signed_64", false, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), -0xAAAAAAABBBBBBBBi64 as u64)]
__u8 a; #[test_case("enum_signed_64", true, Some((KernelVersion::new(6, 0, 0), "https://github.com/torvalds/linux/commit/6089fb3")), -0xCCCCCCCDDDDDDDDi64 as u64)]
__u8 b; fn relocation_tests(
__u8 c; program: &str,
__u8 d; with_relocations: bool,
}; required_kernel_version: Option<(KernelVersion, &str)>,
"#, expected: u64,
target_btf: r#" ) {
struct foo { if let Some((required_kernel_version, commit)) = required_kernel_version {
__u8 a; let current_kernel_version = KernelVersion::current().unwrap();
__u8 c; if current_kernel_version < required_kernel_version {
__u8 b; eprintln!("skipping test on kernel {current_kernel_version:?}, support for {program} was added in {required_kernel_version:?}; see {commit}");
__u8 d;
} s1;
"#,
relocation_code: r#"
__u8 memory[] = {1, 2, 3, 4};
struct foo *ptr = (struct foo *) &memory;
value = __builtin_preserve_access_index(ptr->c);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 2);
assert_eq!(test.run_no_btf().unwrap(), 3);
}
#[test]
fn relocate_enum() {
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAA };
"#,
target_btf: r#"
enum foo { D = 0xBBBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xBBBBBBBB);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAA);
}
#[test]
fn relocate_enum_signed() {
let kernel_version = KernelVersion::current().unwrap();
if kernel_version < KernelVersion::new(6, 0, 0) {
eprintln!("skipping test on kernel {kernel_version:?}, support for signed enum was added in 6.0.0; see https://github.com/torvalds/linux/commit/6089fb3");
return; return;
} }
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0x7AAAAAAA };
"#,
target_btf: r#"
enum foo { D = -0x7BBBBBBB } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
} }
.build() let mut bpf = BpfLoader::new()
.btf(
with_relocations
.then(|| Btf::parse(crate::RELOC_BTF, Endianness::default()).unwrap())
.as_ref(),
)
.load(crate::RELOC_BPF)
.unwrap(); .unwrap();
assert_eq!(test.run().unwrap() as i64, -0x7BBBBBBBi64); let program: &mut UProbe = bpf.program_mut(program).unwrap().try_into().unwrap();
assert_eq!(test.run_no_btf().unwrap() as i64, -0x7AAAAAAAi64); program.load().unwrap();
} program
.attach(
#[test] Some("trigger_btf_relocations_program"),
fn relocate_enum64() { 0,
let kernel_version = KernelVersion::current().unwrap(); "/proc/self/exe",
if kernel_version < KernelVersion::new(6, 0, 0) { None,
eprintln!("skipping test on kernel {kernel_version:?}, support for enum64 was added in 6.0.0; see https://github.com/torvalds/linux/commit/6089fb3"); )
return;
}
let test = RelocationTest {
local_definition: r#"
enum foo { D = 0xAAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = 0xCCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 0xCCCCCCCCDDDDDDDD);
assert_eq!(test.run_no_btf().unwrap(), 0xAAAAAAAABBBBBBBB);
}
#[test]
fn relocate_enum64_signed() {
let kernel_version = KernelVersion::current().unwrap();
if kernel_version < KernelVersion::new(6, 0, 0) {
eprintln!("skipping test on kernel {kernel_version:?}, support for enum64 was added in 6.0.0; see https://github.com/torvalds/linux/commit/6089fb3");
return;
}
let test = RelocationTest {
local_definition: r#"
enum foo { D = -0xAAAAAAABBBBBBBB };
"#,
target_btf: r#"
enum foo { D = -0xCCCCCCCDDDDDDDD } e1;
"#,
relocation_code: r#"
#define BPF_ENUMVAL_VALUE 1
value = __builtin_preserve_enum_value(*(typeof(enum foo) *)D, BPF_ENUMVAL_VALUE);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap() as i64, -0xCCCCCCCDDDDDDDDi64);
assert_eq!(test.run_no_btf().unwrap() as i64, -0xAAAAAAABBBBBBBBi64);
}
#[test]
fn relocate_pointer() {
let test = RelocationTest {
local_definition: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
target_btf: r#"
struct foo {};
struct bar { struct foo *f; };
"#,
relocation_code: r#"
__u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0};
struct bar* ptr = (struct bar *) &memory;
value = (__u64) __builtin_preserve_access_index(ptr->f);
"#,
}
.build()
.unwrap();
assert_eq!(test.run().unwrap(), 42);
assert_eq!(test.run_no_btf().unwrap(), 42);
}
#[test]
fn relocate_struct_flavors() {
let definition = r#"
struct foo {};
struct bar { struct foo *f; };
struct bar___cafe { struct foo *e; struct foo *f; };
"#;
let relocation_code = r#"
__u8 memory[] = {42, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0};
struct bar* ptr = (struct bar *) &memory;
if (__builtin_preserve_field_info((((typeof(struct bar___cafe) *)0)->e), 2)) {
value = (__u64) __builtin_preserve_access_index(((struct bar___cafe *)ptr)->e);
} else {
value = (__u64) __builtin_preserve_access_index(ptr->f);
}
"#;
let test_no_flavor = RelocationTest {
local_definition: definition,
target_btf: definition,
relocation_code,
}
.build()
.unwrap(); .unwrap();
assert_eq!(test_no_flavor.run_no_btf().unwrap(), 42);
}
/// Utility code for running relocation tests:
/// - Generates the eBPF program using probided local definition and relocation code
/// - Generates the BTF from the target btf code
struct RelocationTest {
/// Data structure definition, local to the eBPF program and embedded in the eBPF bytecode
local_definition: &'static str,
/// Target data structure definition. What the vmlinux would actually contain.
target_btf: &'static str,
/// Code executed by the eBPF program to test the relocation.
/// The format should be:
// __u8 memory[] = { ... };
// __u32 value = BPF_CORE_READ((struct foo *)&memory, ...);
//
// The generated code will be executed by attaching a tracepoint to sched_switch
// and emitting `__u32 value` an a map. See the code template below for more details.
relocation_code: &'static str,
}
impl RelocationTest {
/// Build a RelocationTestRunner
fn build(&self) -> Result<RelocationTestRunner> {
Ok(RelocationTestRunner {
ebpf: self.build_ebpf()?,
btf: self.build_btf()?,
})
}
/// - Generate the source eBPF filling a template
/// - Compile it with clang
fn build_ebpf(&self) -> Result<Vec<u8>> {
use std::io::Read as _;
let Self {
local_definition,
relocation_code,
..
} = self;
let mut stdout = compile(&format!(
r#"
#include <linux/bpf.h>
static long (*bpf_map_update_elem)(void *map, const void *key, const void *value, __u64 flags) = (void *) 2;
{local_definition}
struct {{
int (*type)[BPF_MAP_TYPE_ARRAY];
__u32 *key;
__u64 *value;
int (*max_entries)[1];
}} output_map
__attribute__((section(".maps"), used));
__attribute__ ((noinline)) int bpf_func() {{
__u32 key = 0;
__u64 value = 0;
{relocation_code}
bpf_map_update_elem(&output_map, &key, &value, BPF_ANY);
return 0;
}}
__attribute__((section("tracepoint/bpf_prog"), used))
int bpf_prog(void *ctx) {{
bpf_func();
return 0;
}}
char _license[] __attribute__((section("license"), used)) = "GPL";
"#
))
.context("failed to compile eBPF program")?;
let mut output = Vec::new();
stdout.read_to_end(&mut output)?;
Ok(output)
}
/// - Generate the target BTF source with a mock main()
/// - Compile it with clang
/// - Extract the BTF with llvm-objcopy
fn build_btf(&self) -> Result<Btf> {
use std::io::Read as _;
let Self {
target_btf,
relocation_code,
..
} = self;
// BTF files can be generated and inspected with these commands:
// $ clang -c -g -O2 -target bpf target.c
// $ pahole --btf_encode_detached=target.btf -V target.o
// $ bpftool btf dump file ./target.btf format c
let stdout = compile(&format!(
r#"
#include <linux/bpf.h>
{target_btf}
int main() {{
__u64 value = 0;
// This is needed to make sure to emit BTF for the defined types,
// it could be dead code eliminated if we don't.
{relocation_code};
return value;
}}
"#
))
.context("failed to compile BTF")?;
let mut cmd = Command::new("llvm-objcopy");
cmd.args(["--dump-section", ".BTF=-", "-"])
.stdin(stdout)
.stdout(Stdio::piped());
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn {cmd:?}"))?;
let Child { stdout, .. } = &mut child;
let mut stdout = stdout.take().ok_or(anyhow!("failed to open stdout"))?;
let status = child
.wait()
.with_context(|| format!("failed to wait for {cmd:?}"))?;
match status.code() {
Some(code) => match code {
0 => {}
code => bail!("{cmd:?} exited with code {code}"),
},
None => bail!("{cmd:?} terminated by signal"),
}
let mut output = Vec::new(); trigger_btf_relocations_program();
stdout.read_to_end(&mut output)?;
Btf::parse(output.as_slice(), Endianness::default())
.context("failed to parse generated BTF")
}
}
/// Compile an eBPF program and return its bytes.
fn compile(source_code: &str) -> Result<ChildStdout> {
use std::io::Write as _;
let mut cmd = Command::new("clang");
cmd.args([
"-c", "-g", "-O2", "-target", "bpf", "-x", "c", "-", "-o", "-",
])
.stdin(Stdio::piped())
.stdout(Stdio::piped());
let mut child = cmd
.spawn()
.with_context(|| format!("failed to spawn {cmd:?}"))?;
let Child { stdin, stdout, .. } = &mut child;
{
let mut stdin = stdin.take().ok_or(anyhow!("failed to open stdin"))?;
stdin
.write_all(source_code.as_bytes())
.context("failed to write to stdin")?;
}
let stdout = stdout.take().ok_or(anyhow!("failed to open stdout"))?;
let status = child
.wait()
.with_context(|| format!("failed to wait for {cmd:?}"))?;
match status.code() {
Some(code) => match code {
0 => {}
code => bail!("{cmd:?} exited with code {code}"),
},
None => bail!("{cmd:?} terminated by signal"),
}
Ok(stdout)
}
struct RelocationTestRunner {
ebpf: Vec<u8>,
btf: Btf,
}
impl RelocationTestRunner {
/// Run test and return the output value
fn run(&self) -> Result<u64> {
self.run_internal(true).context("Error running with BTF")
}
/// Run without loading btf
fn run_no_btf(&self) -> Result<u64> {
self.run_internal(false)
.context("Error running without BTF")
}
fn run_internal(&self, with_relocations: bool) -> Result<u64> {
let mut loader = BpfLoader::new();
if with_relocations {
loader.btf(Some(&self.btf));
} else {
loader.btf(None);
}
let mut bpf = loader.load(&self.ebpf).context("Loading eBPF failed")?;
let program: &mut TracePoint = bpf
.program_mut("bpf_prog")
.context("bpf_prog not found")?
.try_into()
.context("program not a tracepoint")?;
program.load().context("Loading tracepoint failed")?;
// Attach to sched_switch and wait some time to make sure it executed at least once
program
.attach("sched", "sched_switch")
.context("attach failed")?;
sleep(Duration::from_millis(1000));
// To inspect the loaded eBPF bytecode, increse the timeout and run:
// $ sudo bpftool prog dump xlated name bpf_prog
let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap(); let output_map: Array<_, u64> = bpf.take_map("output_map").unwrap().try_into().unwrap();
let key = 0; let key = 0;
output_map.get(&key, 0).context("Getting key 0 failed") assert_eq!(output_map.get(&key, 0).unwrap(), expected)
} }
#[no_mangle]
#[inline(never)]
pub extern "C" fn trigger_btf_relocations_program() {
core::hint::black_box(trigger_btf_relocations_program);
} }

@ -107,8 +107,9 @@ fn use_map_with_rbpf() {
} }
} }
#[track_caller]
fn bpf_map_update_elem_multimap(map: u64, key: u64, value: u64, _: u64, _: u64) -> u64 { fn bpf_map_update_elem_multimap(map: u64, key: u64, value: u64, _: u64, _: u64) -> u64 {
assert!(map == 0xCAFE00 || map == 0xCAFE01); assert_matches!(map, 0xCAFE00 | 0xCAFE01);
let key = *unsafe { (key as usize as *const u32).as_ref().unwrap() }; let key = *unsafe { (key as usize as *const u32).as_ref().unwrap() };
let value = *unsafe { (value as usize as *const u64).as_ref().unwrap() }; let value = *unsafe { (value as usize as *const u64).as_ref().unwrap() };
assert_eq!(key, 0); assert_eq!(key, 0);

@ -1,12 +1,13 @@
#!/bin/sh #!/usr/bin/env bash
set -e set -ex
if [ "$(uname -s)" = "Darwin" ]; then if [ "$(uname -s)" = "Darwin" ]; then
export PATH="$(dirname $(brew list gnu-getopt | grep "bin/getopt$")):$PATH" PATH="$(dirname "$(brew list gnu-getopt | grep "bin/getopt$")"):$PATH"
export PATH
fi fi
AYA_SOURCE_DIR="$(realpath $(dirname $0)/..)" AYA_SOURCE_DIR="$(realpath "$(dirname "$0")"/..)"
# Temporary directory for tests to use. # Temporary directory for tests to use.
AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp" AYA_TMPDIR="${AYA_SOURCE_DIR}/.tmp"
@ -108,7 +109,7 @@ ssh_authorized_keys:
- ${pub_key} - ${pub_key}
EOF EOF
$AYA_SOURCE_DIR/test/cloud-localds "${AYA_TMPDIR}/seed.img" "${AYA_TMPDIR}/user-data.yaml" "${AYA_TMPDIR}/metadata.yaml" "$AYA_SOURCE_DIR"/test/cloud-localds "${AYA_TMPDIR}/seed.img" "${AYA_TMPDIR}/user-data.yaml" "${AYA_TMPDIR}/metadata.yaml"
case "${AYA_GUEST_ARCH}" in case "${AYA_GUEST_ARCH}" in
x86_64) x86_64)
QEMU=qemu-system-x86_64 QEMU=qemu-system-x86_64
@ -129,7 +130,7 @@ EOF
QEMU=qemu-system-aarch64 QEMU=qemu-system-aarch64
machine="virt" machine="virt"
cpu="cortex-a57" cpu="cortex-a57"
uefi="-drive file=${AARCH64_UEFI},if=pflash,format=raw,readonly=on" uefi=("-drive" "file=${AARCH64_UEFI},if=pflash,format=raw,readonly=on")
if [ "${AYA_HOST_ARCH}" = "${AYA_GUEST_ARCH}" ]; then if [ "${AYA_HOST_ARCH}" = "${AYA_GUEST_ARCH}" ]; then
if [ -c /dev/kvm ]; then if [ -c /dev/kvm ]; then
machine="${machine},accel=kvm" machine="${machine},accel=kvm"
@ -153,10 +154,8 @@ EOF
if [ ! -f "${AYA_IMGDIR}/vm.qcow2" ]; then if [ ! -f "${AYA_IMGDIR}/vm.qcow2" ]; then
echo "Creating VM image" echo "Creating VM image"
qemu-img create -F qcow2 -f qcow2 -o backing_file="${AYA_IMGDIR}/${AYA_TEST_IMAGE}.${AYA_GUEST_ARCH}.qcow2" "${AYA_IMGDIR}/vm.qcow2" || return 1 qemu-img create -F qcow2 -f qcow2 -o backing_file="${AYA_IMGDIR}/${AYA_TEST_IMAGE}.${AYA_GUEST_ARCH}.qcow2" "${AYA_IMGDIR}/vm.qcow2" || return 1
CACHED_VM=0
else else
echo "Reusing existing VM image" echo "Reusing existing VM image"
CACHED_VM=1
fi fi
$QEMU \ $QEMU \
-machine "${machine}" \ -machine "${machine}" \
@ -169,7 +168,7 @@ EOF
-pidfile "${AYA_TMPDIR}/vm.pid" \ -pidfile "${AYA_TMPDIR}/vm.pid" \
-device virtio-net-pci,netdev=net0 \ -device virtio-net-pci,netdev=net0 \
-netdev user,id=net0,hostfwd=tcp::2222-:22 \ -netdev user,id=net0,hostfwd=tcp::2222-:22 \
$uefi \ "${uefi[@]}" \
-drive if=virtio,format=qcow2,file="${AYA_IMGDIR}/vm.qcow2" \ -drive if=virtio,format=qcow2,file="${AYA_IMGDIR}/vm.qcow2" \
-drive if=virtio,format=raw,file="${AYA_TMPDIR}/seed.img" || return 1 -drive if=virtio,format=raw,file="${AYA_TMPDIR}/seed.img" || return 1
@ -177,7 +176,7 @@ EOF
echo "Waiting for SSH on port 2222..." echo "Waiting for SSH on port 2222..."
retry=0 retry=0
max_retries=300 max_retries=300
while ! ssh -q -F "${AYA_TMPDIR}/ssh_config" -o ConnectTimeout=1 -i "${AYA_TMPDIR}/test_rsa" ${AYA_SSH_USER}@localhost -p 2222 echo "Hello VM"; do while ! ssh -q -F "${AYA_TMPDIR}/ssh_config" -o ConnectTimeout=1 -i "${AYA_TMPDIR}/test_rsa" "${AYA_SSH_USER}"@localhost -p 2222 echo "Hello VM"; do
retry=$((retry+1)) retry=$((retry+1))
if [ ${retry} -gt ${max_retries} ]; then if [ ${retry} -gt ${max_retries} ]; then
echo "Unable to connect to VM" echo "Unable to connect to VM"
@ -192,11 +191,7 @@ EOF
exec_vm sudo dnf config-manager --set-enabled updates-testing exec_vm sudo dnf config-manager --set-enabled updates-testing
exec_vm sudo dnf config-manager --set-enabled updates-testing-modular exec_vm sudo dnf config-manager --set-enabled updates-testing-modular
echo "Installing dependencies" echo "Installing dependencies"
exec_vm sudo dnf install -qy bpftool llvm llvm-devel clang clang-devel zlib-devel git exec_vm sudo dnf install -qy bpftool
exec_vm 'curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- \
-y --profile minimal --default-toolchain nightly --component rust-src --component clippy'
exec_vm 'echo source ~/.cargo/env >> ~/.bashrc'
exec_vm cargo install bpf-linker --git https://github.com/aya-rs/bpf-linker.git
} }
scp_vm() { scp_vm() {
@ -209,14 +204,14 @@ scp_vm() {
} }
rsync_vm() { rsync_vm() {
rsync -a -e "ssh -p 2222 -F ${AYA_TMPDIR}/ssh_config -i ${AYA_TMPDIR}/test_rsa" $1 $AYA_SSH_USER@localhost: rsync -a -e "ssh -p 2222 -F ${AYA_TMPDIR}/ssh_config -i ${AYA_TMPDIR}/test_rsa" "$1" "$AYA_SSH_USER"@localhost:
} }
exec_vm() { exec_vm() {
ssh -q -F "${AYA_TMPDIR}/ssh_config" \ ssh -q -F "${AYA_TMPDIR}/ssh_config" \
-i "${AYA_TMPDIR}/test_rsa" \ -i "${AYA_TMPDIR}/test_rsa" \
-p 2222 \ -p 2222 \
${AYA_SSH_USER}@localhost \ "${AYA_SSH_USER}"@localhost \
"$@" "$@"
} }
@ -229,8 +224,7 @@ stop_vm() {
} }
cleanup_vm() { cleanup_vm() {
stop_vm if ! stop_vm; then
if [ "$?" != "0" ]; then
rm -f "${AYA_IMGDIR}/vm.qcow2" rm -f "${AYA_IMGDIR}/vm.qcow2"
fi fi
} }
@ -239,12 +233,11 @@ start_vm
trap cleanup_vm EXIT trap cleanup_vm EXIT
# make sure we always use fresh sources (also see comment at the end) # make sure we always use fresh sources (also see comment at the end)
exec_vm "rm -rf aya/*" rsync_vm "$*"
rsync_vm "--exclude=target --exclude=.tmp $AYA_SOURCE_DIR"
exec_vm "cd aya; cargo xtask integration-test $*" exec_vm "find $* -type f -executable -print0 | xargs -0 -I {} sudo {} --test-threads=1"
# we rm and sync but it doesn't seem to work reliably - I guess we could sleep a # we rm and sync but it doesn't seem to work reliably - I guess we could sleep a
# few seconds after but ain't nobody got time for that. Instead we also rm # few seconds after but ain't nobody got time for that. Instead we also rm
# before rsyncing. # before rsyncing.
exec_vm "rm -rf aya/*; sync" exec_vm "rm -rf $*; sync"

Loading…
Cancel
Save